repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/bin/cleanup/cleanup.py
|
# Copyright 2021 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Clean up resources created by the tests.
This is intended as a tool to delete leaked resources from old tests.
Typical usage examples:
python3 tools/run_tests/xds_k8s_test_driver/bin/cleanup/cleanup.py\
--project=grpc-testing\
--network=default-vpc\
--kube_context=gke_grpc-testing_us-central1-a_psm-interop-security
--resource_prefix='required-but-does-not-matter'\
--td_bootstrap_image='required-but-does-not-matter' --server_image='required-but-does-not-matter' --client_image='required-but-does-not-matter'
"""
import datetime
import functools
import json
import logging
import os
import re
import subprocess
from typing import Any, List
from absl import app
from absl import flags
import dateutil
from framework import xds_flags
from framework import xds_k8s_flags
from framework.infrastructure import gcp
from framework.infrastructure import k8s
from framework.infrastructure import traffic_director
from framework.test_app.runners.k8s import k8s_xds_client_runner
from framework.test_app.runners.k8s import k8s_xds_server_runner
logger = logging.getLogger(__name__)
Json = Any
_KubernetesClientRunner = k8s_xds_client_runner.KubernetesClientRunner
_KubernetesServerRunner = k8s_xds_server_runner.KubernetesServerRunner
GCLOUD = os.environ.get("GCLOUD", "gcloud")
GCLOUD_CMD_TIMEOUT_S = datetime.timedelta(seconds=5).total_seconds()
ZONE = "us-central1-a"
SECONDARY_ZONE = "us-west1-b"
PSM_SECURITY_PREFIX = "psm-interop" # Prefix for gke resources to delete.
URL_MAP_TEST_PREFIX = ( # Prefix for url-map test resources to delete.
"interop-psm-url-map"
)
KEEP_PERIOD_HOURS = flags.DEFINE_integer(
"keep_hours",
default=168,
help=(
"number of hours for a resource to keep. Resources older than this will"
" be deleted. Default is 168 (7 days)"
),
)
DRY_RUN = flags.DEFINE_bool(
"dry_run",
default=False,
help="dry run, print resources but do not perform deletion",
)
TD_RESOURCE_PREFIXES = flags.DEFINE_list(
"td_resource_prefixes",
default=[PSM_SECURITY_PREFIX],
help=(
"a comma-separated list of prefixes for which the leaked TD resources"
" will be deleted"
),
)
SERVER_PREFIXES = flags.DEFINE_list(
"server_prefixes",
default=[PSM_SECURITY_PREFIX],
help=(
"a comma-separated list of prefixes for which the leaked servers will"
" be deleted"
),
)
CLIENT_PREFIXES = flags.DEFINE_list(
"client_prefixes",
default=[PSM_SECURITY_PREFIX, URL_MAP_TEST_PREFIX],
help=(
"a comma-separated list of prefixes for which the leaked clients will"
" be deleted"
),
)
def load_keep_config() -> None:
global KEEP_CONFIG
json_path = os.path.realpath(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"keep_xds_interop_resources.json",
)
)
with open(json_path, "r") as f:
KEEP_CONFIG = json.load(f)
logging.debug(
"Resource keep config loaded: %s", json.dumps(KEEP_CONFIG, indent=2)
)
def is_marked_as_keep_gce(suffix: str) -> bool:
return suffix in KEEP_CONFIG["gce_framework"]["suffix"]
def is_marked_as_keep_gke(suffix: str) -> bool:
return suffix in KEEP_CONFIG["gke_framework"]["suffix"]
@functools.lru_cache()
def get_expire_timestamp() -> datetime.datetime:
return datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(
hours=KEEP_PERIOD_HOURS.value
)
def exec_gcloud(project: str, *cmds: List[str]) -> Json:
cmds = [GCLOUD, "--project", project, "--quiet"] + list(cmds)
if "list" in cmds:
# Add arguments to shape the list output
cmds.extend(
[
"--format",
"json",
"--filter",
f"creationTimestamp <= {get_expire_timestamp().isoformat()}",
]
)
# Executing the gcloud command
logging.debug("Executing: %s", " ".join(cmds))
proc = subprocess.Popen(
cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
# NOTE(lidiz) the gcloud subprocess won't return unless its output is read
stdout = proc.stdout.read()
stderr = proc.stderr.read()
try:
returncode = proc.wait(timeout=GCLOUD_CMD_TIMEOUT_S)
except subprocess.TimeoutExpired:
logging.error("> Timeout executing cmd [%s]", " ".join(cmds))
return None
if returncode:
logging.error(
"> Failed to execute cmd [%s], returned %d, stderr: %s",
" ".join(cmds),
returncode,
stderr,
)
return None
if stdout:
return json.loads(stdout)
return None
def remove_relative_resources_run_xds_tests(
project: str, network: str, prefix: str, suffix: str
):
"""Removing GCP resources created by run_xds_tests.py."""
logging.info(
"----- Removing run_xds_tests.py resources with suffix [%s]", suffix
)
exec_gcloud(
project,
"compute",
"forwarding-rules",
"delete",
f"test-forwarding-rule{suffix}",
"--global",
)
exec_gcloud(
project,
"compute",
"target-http-proxies",
"delete",
f"test-target-proxy{suffix}",
)
exec_gcloud(
project,
"alpha",
"compute",
"target-grpc-proxies",
"delete",
f"test-target-proxy{suffix}",
)
exec_gcloud(project, "compute", "url-maps", "delete", f"test-map{suffix}")
exec_gcloud(
project,
"compute",
"backend-services",
"delete",
f"test-backend-service{suffix}",
"--global",
)
exec_gcloud(
project,
"compute",
"backend-services",
"delete",
f"test-backend-service-alternate{suffix}",
"--global",
)
exec_gcloud(
project,
"compute",
"backend-services",
"delete",
f"test-backend-service-extra{suffix}",
"--global",
)
exec_gcloud(
project,
"compute",
"backend-services",
"delete",
f"test-backend-service-more-extra{suffix}",
"--global",
)
exec_gcloud(
project, "compute", "firewall-rules", "delete", f"test-fw-rule{suffix}"
)
exec_gcloud(
project, "compute", "health-checks", "delete", f"test-hc{suffix}"
)
exec_gcloud(
project,
"compute",
"instance-groups",
"managed",
"delete",
f"test-ig{suffix}",
"--zone",
ZONE,
)
exec_gcloud(
project,
"compute",
"instance-groups",
"managed",
"delete",
f"test-ig-same-zone{suffix}",
"--zone",
ZONE,
)
exec_gcloud(
project,
"compute",
"instance-groups",
"managed",
"delete",
f"test-ig-secondary-zone{suffix}",
"--zone",
SECONDARY_ZONE,
)
exec_gcloud(
project,
"compute",
"instance-templates",
"delete",
f"test-template{suffix}",
)
# cleanup_td creates TrafficDirectorManager (and its varients for security and
# AppNet), and then calls the cleanup() methods.
#
# Note that the varients are all based on the basic TrafficDirectorManager, so
# their `cleanup()` might do duplicate work. But deleting an non-exist resource
# returns 404, and is OK.
def cleanup_td_for_gke(project, network, resource_prefix, resource_suffix):
gcp_api_manager = gcp.api.GcpApiManager()
plain_td = traffic_director.TrafficDirectorManager(
gcp_api_manager,
project=project,
network=network,
resource_prefix=resource_prefix,
resource_suffix=resource_suffix,
)
security_td = traffic_director.TrafficDirectorSecureManager(
gcp_api_manager,
project=project,
network=network,
resource_prefix=resource_prefix,
resource_suffix=resource_suffix,
)
# TODO: cleanup appnet resources.
# appnet_td = traffic_director.TrafficDirectorAppNetManager(
# gcp_api_manager,
# project=project,
# network=network,
# resource_prefix=resource_prefix,
# resource_suffix=resource_suffix)
logger.info(
"----- Removing traffic director for gke, prefix %s, suffix %s",
resource_prefix,
resource_suffix,
)
security_td.cleanup(force=True)
# appnet_td.cleanup(force=True)
plain_td.cleanup(force=True)
# cleanup_client creates a client runner, and calls its cleanup() method.
def cleanup_client(
project,
network,
k8s_api_manager,
resource_prefix,
resource_suffix,
gcp_service_account,
):
runner_kwargs = dict(
deployment_name=xds_flags.CLIENT_NAME.value,
image_name=xds_k8s_flags.CLIENT_IMAGE.value,
td_bootstrap_image=xds_k8s_flags.TD_BOOTSTRAP_IMAGE.value,
gcp_project=project,
gcp_api_manager=gcp.api.GcpApiManager(),
gcp_service_account=gcp_service_account,
xds_server_uri=xds_flags.XDS_SERVER_URI.value,
network=network,
stats_port=xds_flags.CLIENT_PORT.value,
)
client_namespace = _KubernetesClientRunner.make_namespace_name(
resource_prefix, resource_suffix
)
client_runner = _KubernetesClientRunner(
k8s.KubernetesNamespace(k8s_api_manager, client_namespace),
**runner_kwargs,
)
logger.info("Cleanup client")
client_runner.cleanup(force=True, force_namespace=True)
# cleanup_server creates a server runner, and calls its cleanup() method.
def cleanup_server(
project,
network,
k8s_api_manager,
resource_prefix,
resource_suffix,
gcp_service_account,
):
runner_kwargs = dict(
deployment_name=xds_flags.SERVER_NAME.value,
image_name=xds_k8s_flags.SERVER_IMAGE.value,
td_bootstrap_image=xds_k8s_flags.TD_BOOTSTRAP_IMAGE.value,
gcp_project=project,
gcp_api_manager=gcp.api.GcpApiManager(),
gcp_service_account=gcp_service_account,
network=network,
)
server_namespace = _KubernetesServerRunner.make_namespace_name(
resource_prefix, resource_suffix
)
server_runner = _KubernetesServerRunner(
k8s.KubernetesNamespace(k8s_api_manager, server_namespace),
**runner_kwargs,
)
logger.info("Cleanup server")
server_runner.cleanup(force=True, force_namespace=True)
def delete_leaked_td_resources(
dry_run, td_resource_rules, project, network, resources
):
for resource in resources:
logger.info("-----")
logger.info("----- Cleaning up resource %s", resource["name"])
if dry_run:
# Skip deletion for dry-runs
logging.info("----- Skipped [Dry Run]: %s", resource["name"])
continue
matched = False
for regex, resource_prefix, keep, remove in td_resource_rules:
result = re.search(regex, resource["name"])
if result is not None:
matched = True
if keep(result.group(1)):
logging.info("Skipped [keep]:")
break # break inner loop, continue outer loop
remove(project, network, resource_prefix, result.group(1))
break
if not matched:
logging.info(
"----- Skipped [does not matching resource name templates]"
)
def delete_k8s_resources(
dry_run,
k8s_resource_rules,
project,
network,
k8s_api_manager,
gcp_service_account,
namespaces,
):
for ns in namespaces:
logger.info("-----")
logger.info("----- Cleaning up k8s namespaces %s", ns.metadata.name)
if ns.metadata.creation_timestamp <= get_expire_timestamp():
if dry_run:
# Skip deletion for dry-runs
logging.info("----- Skipped [Dry Run]: %s", ns.metadata.name)
continue
matched = False
for regex, resource_prefix, remove in k8s_resource_rules:
result = re.search(regex, ns.metadata.name)
if result is not None:
matched = True
remove(
project,
network,
k8s_api_manager,
resource_prefix,
result.group(1),
gcp_service_account,
)
break
if not matched:
logging.info(
"----- Skipped [does not matching resource name templates]"
)
else:
logging.info("----- Skipped [resource is within expiry date]")
def find_and_remove_leaked_k8s_resources(
dry_run, project, network, gcp_service_account
):
k8s_resource_rules = [
# items in each tuple, in order
# - regex to match
# - prefix of the resources
# - function to delete the resource
]
for prefix in CLIENT_PREFIXES.value:
k8s_resource_rules.append(
(f"{prefix}-client-(.*)", prefix, cleanup_client),
)
for prefix in SERVER_PREFIXES.value:
k8s_resource_rules.append(
(f"{prefix}-server-(.*)", prefix, cleanup_server),
)
# Delete leaked k8s namespaces, those usually mean there are leaked testing
# client/servers from the gke framework.
k8s_api_manager = k8s.KubernetesApiManager(xds_k8s_flags.KUBE_CONTEXT.value)
nss = k8s_api_manager.core.list_namespace()
delete_k8s_resources(
dry_run,
k8s_resource_rules,
project,
network,
k8s_api_manager,
gcp_service_account,
nss.items,
)
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
load_keep_config()
# Must be called before KubernetesApiManager or GcpApiManager init.
xds_flags.set_socket_default_timeout_from_flag()
project: str = xds_flags.PROJECT.value
network: str = xds_flags.NETWORK.value
gcp_service_account: str = xds_k8s_flags.GCP_SERVICE_ACCOUNT.value
dry_run: bool = DRY_RUN.value
td_resource_rules = [
# itmes in each tuple, in order
# - regex to match
# - prefix of the resource (only used by gke resources)
# - function to check of the resource should be kept
# - function to delete the resource
(
r"test-hc(.*)",
"",
is_marked_as_keep_gce,
remove_relative_resources_run_xds_tests,
),
(
r"test-template(.*)",
"",
is_marked_as_keep_gce,
remove_relative_resources_run_xds_tests,
),
]
for prefix in TD_RESOURCE_PREFIXES.value:
td_resource_rules.append(
(
f"{prefix}-health-check-(.*)",
prefix,
is_marked_as_keep_gke,
cleanup_td_for_gke,
),
)
# List resources older than KEEP_PERIOD. We only list health-checks and
# instance templates because these are leaves in the resource dependency tree.
#
# E.g. forwarding-rule depends on the target-proxy. So leaked
# forwarding-rule indicates there's a leaked target-proxy (because this
# target proxy cannot deleted unless the forwarding rule is deleted). The
# leaked target-proxy is guaranteed to be a super set of leaked
# forwarding-rule.
compute = gcp.compute.ComputeV1(gcp.api.GcpApiManager(), project)
leakedHealthChecks = []
for item in compute.list_health_check()["items"]:
if (
dateutil.parser.isoparse(item["creationTimestamp"])
<= get_expire_timestamp()
):
leakedHealthChecks.append(item)
delete_leaked_td_resources(
dry_run, td_resource_rules, project, network, leakedHealthChecks
)
# Delete leaked instance templates, those usually mean there are leaked VMs
# from the gce framework. Also note that this is only needed for the gce
# resources.
leakedInstanceTemplates = exec_gcloud(
project, "compute", "instance-templates", "list"
)
delete_leaked_td_resources(
dry_run, td_resource_rules, project, network, leakedInstanceTemplates
)
find_and_remove_leaked_k8s_resources(
dry_run, project, network, gcp_service_account
)
if __name__ == "__main__":
app.run(main)
| 17,146
| 29.565062
| 147
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/tests/api_listener_test.py
|
# Copyright 2021 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from absl import flags
from absl.testing import absltest
from google.protobuf import json_format
from framework import xds_k8s_testcase
from framework import xds_url_map_testcase
from framework.helpers import skips
logger = logging.getLogger(__name__)
flags.adopt_module_key_flags(xds_k8s_testcase)
# Type aliases
_XdsTestServer = xds_k8s_testcase.XdsTestServer
_XdsTestClient = xds_k8s_testcase.XdsTestClient
_DumpedXdsConfig = xds_url_map_testcase.DumpedXdsConfig
_Lang = skips.Lang
_TD_CONFIG_RETRY_WAIT_SEC = 2
class ApiListenerTest(xds_k8s_testcase.RegularXdsKubernetesTestCase):
@staticmethod
def is_supported(config: skips.TestConfig) -> bool:
if config.client_lang == _Lang.PYTHON:
# gRPC Python versions prior to v1.43.x don't support handling empty
# RDS update.
return config.version_gte("v1.43.x")
return True
def test_api_listener(self) -> None:
with self.subTest("00_create_health_check"):
self.td.create_health_check()
with self.subTest("01_create_backend_services"):
self.td.create_backend_service()
with self.subTest("02_create_default_url_map"):
self.td.create_url_map(self.server_xds_host, self.server_xds_port)
with self.subTest("03_create_default_target_proxy"):
self.td.create_target_proxy()
with self.subTest("04_create_default_forwarding_rule"):
self.td.create_forwarding_rule(self.server_xds_port)
test_server: _XdsTestServer
with self.subTest("05_start_test_server"):
test_server = self.startTestServers()[0]
with self.subTest("06_add_server_backends_to_backend_services"):
self.setupServerBackends()
test_client: _XdsTestClient
with self.subTest("07_start_test_client"):
test_client = self.startTestClient(test_server)
with self.subTest("08_test_client_xds_config_exists"):
self.assertXdsConfigExists(test_client)
with self.subTest("09_test_server_received_rpcs"):
self.assertSuccessfulRpcs(test_client)
with self.subTest("10_create_alternate_url_map"):
self.td.create_alternative_url_map(
self.server_xds_host,
self.server_xds_port,
self.td.backend_service,
)
# Create alternate target proxy pointing to alternate url_map with the same
# host name in host rule. The port is fixed because they point to the same backend service.
# Therefore we have to choose a non-`0.0.0.0` ip because ip:port needs to be unique.
# We also have to set validate_for_proxyless=false because requires `0.0.0.0` ip.
# See https://github.com/grpc/grpc-java/issues/8009
with self.subTest("11_create_alternate_target_proxy"):
self.td.create_alternative_target_proxy()
# Create a second suite of map+tp+fr with the same host name in host rule.
# We set fr ip_address to be different from `0.0.0.0` and then set
# validate_for_proxyless=false because ip:port needs to be unique.
with self.subTest("12_create_alternate_forwarding_rule"):
self.td.create_alternative_forwarding_rule(
self.server_xds_port, ip_address="10.10.10.10"
)
with self.subTest("13_test_server_received_rpcs_with_two_url_maps"):
self.assertSuccessfulRpcs(test_client)
raw_config = test_client.csds.fetch_client_status(
log_level=logging.INFO
)
dumped_config = _DumpedXdsConfig(
json_format.MessageToDict(raw_config)
)
previous_route_config_version = dumped_config.rds_version
logger.info(
(
"received client config from CSDS with two url maps, "
"dump config: %s, rds version: %s"
),
dumped_config,
previous_route_config_version,
)
with self.subTest("14_delete_one_url_map_target_proxy_forwarding_rule"):
self.td.delete_forwarding_rule()
self.td.delete_target_grpc_proxy()
self.td.delete_url_map()
with self.subTest("15_test_server_continues_to_receive_rpcs"):
self.assertRouteConfigUpdateTrafficHandoff(
test_client,
previous_route_config_version,
_TD_CONFIG_RETRY_WAIT_SEC,
xds_k8s_testcase._TD_CONFIG_MAX_WAIT_SEC,
)
if __name__ == "__main__":
absltest.main(failfast=True)
| 5,241
| 37.82963
| 99
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/tests/round_robin_test.py
|
# Copyright 2021 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import List
from absl import flags
from absl.testing import absltest
from framework import xds_k8s_testcase
logger = logging.getLogger(__name__)
flags.adopt_module_key_flags(xds_k8s_testcase)
# Type aliases
_XdsTestServer = xds_k8s_testcase.XdsTestServer
_XdsTestClient = xds_k8s_testcase.XdsTestClient
class RoundRobinTest(xds_k8s_testcase.RegularXdsKubernetesTestCase):
def test_round_robin(self) -> None:
REPLICA_COUNT = 2
with self.subTest("00_create_health_check"):
self.td.create_health_check()
with self.subTest("01_create_backend_services"):
self.td.create_backend_service()
with self.subTest("02_create_url_map"):
self.td.create_url_map(self.server_xds_host, self.server_xds_port)
with self.subTest("03_create_target_proxy"):
self.td.create_target_proxy()
with self.subTest("04_create_forwarding_rule"):
self.td.create_forwarding_rule(self.server_xds_port)
test_servers: List[_XdsTestServer]
with self.subTest("05_start_test_servers"):
test_servers = self.startTestServers(replica_count=REPLICA_COUNT)
with self.subTest("06_add_server_backends_to_backend_services"):
self.setupServerBackends()
test_client: _XdsTestClient
with self.subTest("07_start_test_client"):
test_client = self.startTestClient(test_servers[0])
with self.subTest("08_test_client_xds_config_exists"):
self.assertXdsConfigExists(test_client)
with self.subTest("09_test_server_received_rpcs_from_test_client"):
self.assertSuccessfulRpcs(test_client)
with self.subTest("10_round_robin"):
num_rpcs = 100
expected_rpcs_per_replica = num_rpcs / REPLICA_COUNT
rpcs_by_peer = self.getClientRpcStats(
test_client, num_rpcs
).rpcs_by_peer
total_requests_received = sum(rpcs_by_peer[x] for x in rpcs_by_peer)
self.assertEqual(
total_requests_received, num_rpcs, "Wrong number of RPCS"
)
for server in test_servers:
hostname = server.hostname
self.assertIn(
hostname,
rpcs_by_peer,
f"Server {hostname} did not receive RPCs",
)
self.assertLessEqual(
abs(rpcs_by_peer[hostname] - expected_rpcs_per_replica),
1,
f"Wrong number of RPCs for server {hostname}",
)
if __name__ == "__main__":
absltest.main(failfast=True)
| 3,274
| 34.215054
| 80
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/tests/authz_test.py
|
# Copyright 2021 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import time
from typing import Optional
from absl import flags
from absl.testing import absltest
import grpc
from framework import xds_k8s_testcase
from framework.helpers import skips
flags.adopt_module_key_flags(xds_k8s_testcase)
# Type aliases
_XdsTestServer = xds_k8s_testcase.XdsTestServer
_XdsTestClient = xds_k8s_testcase.XdsTestClient
_SecurityMode = xds_k8s_testcase.SecurityXdsKubernetesTestCase.SecurityMode
_Lang = skips.Lang
# The client generates QPS even when it is still loading information from xDS.
# Once it finally connects there will be an outpouring of the bufferred RPCs and
# the server needs time to chew through the backlog, especially since it is
# still a new process and so probably interpreted. The server on one run
# processed 225 RPCs a second, so with the client configured for 25 qps this is
# 40 seconds worth of buffering before starting to drain the backlog.
_SETTLE_DURATION = datetime.timedelta(seconds=5)
_SAMPLE_DURATION = datetime.timedelta(seconds=0.5)
class AuthzTest(xds_k8s_testcase.SecurityXdsKubernetesTestCase):
RPC_TYPE_CYCLE = {
"UNARY_CALL": "EMPTY_CALL",
"EMPTY_CALL": "UNARY_CALL",
}
@staticmethod
def is_supported(config: skips.TestConfig) -> bool:
# Per "Authorization (RBAC)" in
# https://github.com/grpc/grpc/blob/master/doc/grpc_xds_features.md
if config.client_lang in _Lang.CPP | _Lang.PYTHON:
return config.version_gte("v1.47.x")
elif config.client_lang in _Lang.GO | _Lang.JAVA:
return config.version_gte("v1.42.x")
elif config.client_lang == _Lang.NODE:
return False
return True
def setUp(self):
super().setUp()
self.next_rpc_type: Optional[int] = None
def authz_rules(self):
return [
{
"destinations": {
"hosts": [f"*:{self.server_xds_port}"],
"ports": [self.server_port],
"httpHeaderMatch": {
"headerName": "test",
"regexMatch": "host-wildcard",
},
},
},
{
"destinations": {
"hosts": [f"*:{self.server_xds_port}"],
"ports": [self.server_port],
"httpHeaderMatch": {
"headerName": "test",
"regexMatch": "header-regex-a+",
},
},
},
{
"destinations": [
{
"hosts": [
f"{self.server_xds_host}:{self.server_xds_port}"
],
"ports": [self.server_port],
"httpHeaderMatch": {
"headerName": "test",
"regexMatch": "host-match1",
},
},
{
"hosts": [
f"a-not-it.com:{self.server_xds_port}",
f"{self.server_xds_host}:{self.server_xds_port}",
"z-not-it.com:1",
],
"ports": [1, self.server_port, 65535],
"httpHeaderMatch": {
"headerName": "test",
"regexMatch": "host-match2",
},
},
],
},
{
"destinations": {
"hosts": [
f"not-the-host:{self.server_xds_port}",
"not-the-host",
],
"ports": [self.server_port],
"httpHeaderMatch": {
"headerName": "test",
"regexMatch": "never-match-host",
},
},
},
{
"destinations": {
"hosts": [f"*:{self.server_xds_port}"],
"ports": [1],
"httpHeaderMatch": {
"headerName": "test",
"regexMatch": "never-match-port",
},
},
},
# b/202058316. The wildcard principal is generating invalid config
# {
# "sources": {
# "principals": ["*"],
# },
# "destinations": {
# "hosts": [f"*:{self.server_xds_port}"],
# "ports": [self.server_port],
# "httpHeaderMatch": {
# "headerName": "test",
# "regexMatch": "principal-present",
# },
# },
# },
{
"sources": [
{
"principals": [
f"spiffe://{self.project}.svc.id.goog/not/the/client",
],
},
{
"principals": [
f"spiffe://{self.project}.svc.id.goog/not/the/client",
(
f"spiffe://{self.project}.svc.id.goog/ns/"
f"{self.client_namespace}/sa/{self.client_name}"
),
],
},
],
"destinations": {
"hosts": [f"*:{self.server_xds_port}"],
"ports": [self.server_port],
"httpHeaderMatch": {
"headerName": "test",
"regexMatch": "match-principal",
},
},
},
{
"sources": {
"principals": [
f"spiffe://{self.project}.svc.id.goog/not/the/client",
],
},
"destinations": {
"hosts": [f"*:{self.server_xds_port}"],
"ports": [self.server_port],
"httpHeaderMatch": {
"headerName": "test",
"regexMatch": "never-match-principal",
},
},
},
]
def configure_and_assert(
self,
test_client: _XdsTestClient,
test_metadata_val: Optional[str],
status_code: grpc.StatusCode,
) -> None:
# Swap method type every sub-test to avoid mixing results
rpc_type = self.next_rpc_type
if rpc_type is None:
stats = test_client.get_load_balancer_accumulated_stats()
for t in self.RPC_TYPE_CYCLE:
if not stats.stats_per_method[t].rpcs_started:
rpc_type = t
self.assertIsNotNone(rpc_type, "All RPC types already used")
self.next_rpc_type = self.RPC_TYPE_CYCLE[rpc_type]
metadata = None
if test_metadata_val is not None:
metadata = ((rpc_type, "test", test_metadata_val),)
test_client.update_config.configure(
rpc_types=[rpc_type], metadata=metadata
)
# b/228743575 Python has as race. Give us time to fix it.
stray_rpc_limit = 1 if self.lang_spec.client_lang == _Lang.PYTHON else 0
self.assertRpcStatusCodes(
test_client,
expected_status=status_code,
duration=_SAMPLE_DURATION,
method=rpc_type,
stray_rpc_limit=stray_rpc_limit,
)
def test_plaintext_allow(self) -> None:
self.setupTrafficDirectorGrpc()
self.td.create_authz_policy(action="ALLOW", rules=self.authz_rules())
self.setupSecurityPolicies(
server_tls=False,
server_mtls=False,
client_tls=False,
client_mtls=False,
)
test_server: _XdsTestServer = self.startSecureTestServer()
self.setupServerBackends()
test_client: _XdsTestClient = self.startSecureTestClient(test_server)
time.sleep(_SETTLE_DURATION.total_seconds())
with self.subTest("01_host_wildcard"):
self.configure_and_assert(
test_client, "host-wildcard", grpc.StatusCode.OK
)
with self.subTest("02_no_match"):
self.configure_and_assert(
test_client, "no-such-rule", grpc.StatusCode.PERMISSION_DENIED
)
self.configure_and_assert(
test_client, None, grpc.StatusCode.PERMISSION_DENIED
)
with self.subTest("03_header_regex"):
self.configure_and_assert(
test_client, "header-regex-a", grpc.StatusCode.OK
)
self.configure_and_assert(
test_client, "header-regex-aa", grpc.StatusCode.OK
)
self.configure_and_assert(
test_client, "header-regex-", grpc.StatusCode.PERMISSION_DENIED
)
self.configure_and_assert(
test_client,
"header-regex-ab",
grpc.StatusCode.PERMISSION_DENIED,
)
self.configure_and_assert(
test_client,
"aheader-regex-a",
grpc.StatusCode.PERMISSION_DENIED,
)
with self.subTest("04_host_match"):
self.configure_and_assert(
test_client, "host-match1", grpc.StatusCode.OK
)
self.configure_and_assert(
test_client, "host-match2", grpc.StatusCode.OK
)
with self.subTest("05_never_match_host"):
self.configure_and_assert(
test_client,
"never-match-host",
grpc.StatusCode.PERMISSION_DENIED,
)
with self.subTest("06_never_match_port"):
self.configure_and_assert(
test_client,
"never-match-port",
grpc.StatusCode.PERMISSION_DENIED,
)
# b/202058316
# with self.subTest('07_principal_present'):
# self.configure_and_assert(test_client, 'principal-present',
# grpc.StatusCode.PERMISSION_DENIED)
def test_tls_allow(self) -> None:
self.setupTrafficDirectorGrpc()
self.td.create_authz_policy(action="ALLOW", rules=self.authz_rules())
self.setupSecurityPolicies(
server_tls=True,
server_mtls=False,
client_tls=True,
client_mtls=False,
)
test_server: _XdsTestServer = self.startSecureTestServer()
self.setupServerBackends()
test_client: _XdsTestClient = self.startSecureTestClient(test_server)
time.sleep(_SETTLE_DURATION.total_seconds())
with self.subTest("01_host_wildcard"):
self.configure_and_assert(
test_client, "host-wildcard", grpc.StatusCode.OK
)
with self.subTest("02_no_match"):
self.configure_and_assert(
test_client, None, grpc.StatusCode.PERMISSION_DENIED
)
# b/202058316
# with self.subTest('03_principal_present'):
# self.configure_and_assert(test_client, 'principal-present',
# grpc.StatusCode.PERMISSION_DENIED)
def test_mtls_allow(self) -> None:
self.setupTrafficDirectorGrpc()
self.td.create_authz_policy(action="ALLOW", rules=self.authz_rules())
self.setupSecurityPolicies(
server_tls=True, server_mtls=True, client_tls=True, client_mtls=True
)
test_server: _XdsTestServer = self.startSecureTestServer()
self.setupServerBackends()
test_client: _XdsTestClient = self.startSecureTestClient(test_server)
time.sleep(_SETTLE_DURATION.total_seconds())
with self.subTest("01_host_wildcard"):
self.configure_and_assert(
test_client, "host-wildcard", grpc.StatusCode.OK
)
with self.subTest("02_no_match"):
self.configure_and_assert(
test_client, None, grpc.StatusCode.PERMISSION_DENIED
)
# b/202058316
# with self.subTest('03_principal_present'):
# self.configure_and_assert(test_client, 'principal-present',
# grpc.StatusCode.OK)
with self.subTest("04_match_principal"):
self.configure_and_assert(
test_client, "match-principal", grpc.StatusCode.OK
)
with self.subTest("05_never_match_principal"):
self.configure_and_assert(
test_client,
"never-match-principal",
grpc.StatusCode.PERMISSION_DENIED,
)
def test_plaintext_deny(self) -> None:
self.setupTrafficDirectorGrpc()
self.td.create_authz_policy(action="DENY", rules=self.authz_rules())
self.setupSecurityPolicies(
server_tls=False,
server_mtls=False,
client_tls=False,
client_mtls=False,
)
test_server: _XdsTestServer = self.startSecureTestServer()
self.setupServerBackends()
test_client: _XdsTestClient = self.startSecureTestClient(test_server)
time.sleep(_SETTLE_DURATION.total_seconds())
with self.subTest("01_host_wildcard"):
self.configure_and_assert(
test_client, "host-wildcard", grpc.StatusCode.PERMISSION_DENIED
)
with self.subTest("02_no_match"):
self.configure_and_assert(test_client, None, grpc.StatusCode.OK)
if __name__ == "__main__":
absltest.main()
| 14,552
| 35.657431
| 82
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/tests/subsetting_test.py
|
# Copyright 2021 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from typing import List
from absl import flags
from absl import logging
from absl.testing import absltest
from google.protobuf import json_format
from framework import xds_k8s_testcase
from framework import xds_url_map_testcase
from framework.helpers import skips
flags.adopt_module_key_flags(xds_k8s_testcase)
# Type aliases
_XdsTestServer = xds_k8s_testcase.XdsTestServer
_XdsTestClient = xds_k8s_testcase.XdsTestClient
_SUBSET_SIZE = 4
_NUM_BACKENDS = 8
_NUM_CLIENTS = 3
class SubsettingTest(xds_k8s_testcase.RegularXdsKubernetesTestCase):
@staticmethod
def is_supported(config: skips.TestConfig) -> bool:
# Subsetting is an experimental feature where most work is done on the
# server-side. We limit it to only run on master branch to save
# resources.
return config.version_gte("master")
def test_subsetting_basic(self) -> None:
with self.subTest("00_create_health_check"):
self.td.create_health_check()
with self.subTest("01_create_backend_services"):
self.td.create_backend_service(subset_size=_SUBSET_SIZE)
with self.subTest("02_create_url_map"):
self.td.create_url_map(self.server_xds_host, self.server_xds_port)
with self.subTest("03_create_target_proxy"):
self.td.create_target_proxy()
with self.subTest("04_create_forwarding_rule"):
self.td.create_forwarding_rule(self.server_xds_port)
test_servers: List[_XdsTestServer]
with self.subTest("05_start_test_servers"):
test_servers = self.startTestServers(replica_count=_NUM_BACKENDS)
with self.subTest("06_add_server_backends_to_backend_services"):
self.setupServerBackends()
rpc_distribution = collections.defaultdict(int)
with self.subTest("07_start_test_client"):
for i in range(_NUM_CLIENTS):
# Clean created client pods if there is any.
if self.client_runner.time_start_requested:
# TODO(sergiitk): Speed up by reusing the namespace.
self.client_runner.cleanup()
# Create a test client
test_client: _XdsTestClient = self.startTestClient(
test_servers[0]
)
# Validate the number of received endpoints
config = test_client.csds.fetch_client_status(
log_level=logging.INFO
)
self.assertIsNotNone(config)
json_config = json_format.MessageToDict(config)
parsed = xds_url_map_testcase.DumpedXdsConfig(json_config)
logging.info(
"Client %d received endpoints (len=%s): %s",
i,
len(parsed.endpoints),
parsed.endpoints,
)
self.assertLen(parsed.endpoints, _SUBSET_SIZE)
# Record RPC stats
lb_stats = self.getClientRpcStats(
test_client, _NUM_BACKENDS * 25
)
for key, value in lb_stats.rpcs_by_peer.items():
rpc_distribution[key] += value
with self.subTest("08_log_rpc_distribution"):
server_entries = sorted(
rpc_distribution.items(), key=lambda x: -x[1]
)
# Validate if clients are receiving different sets of backends (3
# client received a total of 4 unique backends == FAIL, a total of 5
# unique backends == PASS)
self.assertGreater(len(server_entries), _SUBSET_SIZE)
logging.info(
"RPC distribution (len=%s): %s",
len(server_entries),
server_entries,
)
peak = server_entries[0][1]
mean = sum(map(lambda x: x[1], server_entries)) / len(
server_entries
)
logging.info(
"Peak=%d Mean=%.1f Peak-to-Mean-Ratio=%.2f",
peak,
mean,
peak / mean,
)
if __name__ == "__main__":
absltest.main(failfast=True)
| 4,794
| 36.170543
| 80
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/tests/app_net_test.py
|
# Copyright 2021 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from absl import flags
from absl.testing import absltest
from framework import xds_k8s_testcase
logger = logging.getLogger(__name__)
flags.adopt_module_key_flags(xds_k8s_testcase)
_XdsTestServer = xds_k8s_testcase.XdsTestServer
_XdsTestClient = xds_k8s_testcase.XdsTestClient
class AppNetTest(xds_k8s_testcase.AppNetXdsKubernetesTestCase):
def test_ping_pong(self):
with self.subTest("0_create_health_check"):
self.td.create_health_check()
with self.subTest("1_create_backend_service"):
self.td.create_backend_service()
with self.subTest("2_create_mesh"):
self.td.create_mesh()
with self.subTest("3_create_grpc_route"):
self.td.create_grpc_route(
self.server_xds_host, self.server_xds_port
)
test_server: _XdsTestServer
with self.subTest("4_start_test_server"):
test_server = self.startTestServers(replica_count=1)[0]
with self.subTest("5_setup_server_backends"):
self.setupServerBackends()
test_client: _XdsTestClient
with self.subTest("6_start_test_client"):
test_client = self.startTestClient(
test_server, config_mesh=self.td.mesh.name
)
with self.subTest("7_assert_xds_config_exists"):
self.assertXdsConfigExists(test_client)
with self.subTest("8_assert_successful_rpcs"):
self.assertSuccessfulRpcs(test_client)
if __name__ == "__main__":
absltest.main(failfast=True)
| 2,141
| 31.454545
| 74
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/tests/remove_neg_test.py
|
# Copyright 2021 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import List
from absl import flags
from absl.testing import absltest
from framework import xds_k8s_testcase
from framework.infrastructure import k8s
from framework.test_app.runners.k8s import k8s_xds_server_runner
logger = logging.getLogger(__name__)
flags.adopt_module_key_flags(xds_k8s_testcase)
# Type aliases
_XdsTestServer = xds_k8s_testcase.XdsTestServer
_XdsTestClient = xds_k8s_testcase.XdsTestClient
_KubernetesServerRunner = k8s_xds_server_runner.KubernetesServerRunner
class RemoveNegTest(xds_k8s_testcase.RegularXdsKubernetesTestCase):
def setUp(self):
super().setUp()
self.alternate_server_runner = _KubernetesServerRunner(
k8s.KubernetesNamespace(
self.k8s_api_manager, self.server_namespace
),
deployment_name=self.server_name + "-alt",
image_name=self.server_image,
gcp_service_account=self.gcp_service_account,
td_bootstrap_image=self.td_bootstrap_image,
gcp_project=self.project,
gcp_api_manager=self.gcp_api_manager,
xds_server_uri=self.xds_server_uri,
network=self.network,
debug_use_port_forwarding=self.debug_use_port_forwarding,
reuse_namespace=True,
)
def cleanup(self):
super().cleanup()
if hasattr(self, "alternate_server_runner"):
self.alternate_server_runner.cleanup(
force=self.force_cleanup, force_namespace=self.force_cleanup
)
def test_remove_neg(self) -> None:
with self.subTest("00_create_health_check"):
self.td.create_health_check()
with self.subTest("01_create_backend_services"):
self.td.create_backend_service()
with self.subTest("02_create_url_map"):
self.td.create_url_map(self.server_xds_host, self.server_xds_port)
with self.subTest("03_create_target_proxy"):
self.td.create_target_proxy()
with self.subTest("04_create_forwarding_rule"):
self.td.create_forwarding_rule(self.server_xds_port)
default_test_servers: List[_XdsTestServer]
same_zone_test_servers: List[_XdsTestServer]
with self.subTest("05_start_test_servers"):
default_test_servers = self.startTestServers()
same_zone_test_servers = self.startTestServers(
server_runner=self.alternate_server_runner
)
with self.subTest("06_add_server_backends_to_backend_services"):
self.setupServerBackends()
self.setupServerBackends(server_runner=self.alternate_server_runner)
test_client: _XdsTestClient
with self.subTest("07_start_test_client"):
test_client = self.startTestClient(default_test_servers[0])
with self.subTest("08_test_client_xds_config_exists"):
self.assertXdsConfigExists(test_client)
with self.subTest("09_test_server_received_rpcs_from_test_client"):
self.assertSuccessfulRpcs(test_client)
with self.subTest("10_remove_neg"):
self.assertRpcsEventuallyGoToGivenServers(
test_client, default_test_servers + same_zone_test_servers
)
self.removeServerBackends(
server_runner=self.alternate_server_runner
)
self.assertRpcsEventuallyGoToGivenServers(
test_client, default_test_servers
)
if __name__ == "__main__":
absltest.main(failfast=True)
| 4,131
| 36.225225
| 80
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/tests/change_backend_service_test.py
|
# Copyright 2021 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import List
from absl import flags
from absl.testing import absltest
from framework import xds_k8s_testcase
from framework.infrastructure import k8s
from framework.test_app.runners.k8s import k8s_xds_server_runner
logger = logging.getLogger(__name__)
flags.adopt_module_key_flags(xds_k8s_testcase)
# Type aliases
_XdsTestServer = xds_k8s_testcase.XdsTestServer
_XdsTestClient = xds_k8s_testcase.XdsTestClient
_KubernetesServerRunner = k8s_xds_server_runner.KubernetesServerRunner
class ChangeBackendServiceTest(xds_k8s_testcase.RegularXdsKubernetesTestCase):
def setUp(self):
super().setUp()
self.alternate_k8s_namespace = k8s.KubernetesNamespace(
self.k8s_api_manager, self.server_namespace
)
self.alternate_server_runner = _KubernetesServerRunner(
self.alternate_k8s_namespace,
deployment_name=self.server_name + "-alt",
image_name=self.server_image,
gcp_service_account=self.gcp_service_account,
td_bootstrap_image=self.td_bootstrap_image,
gcp_project=self.project,
gcp_api_manager=self.gcp_api_manager,
xds_server_uri=self.xds_server_uri,
network=self.network,
debug_use_port_forwarding=self.debug_use_port_forwarding,
reuse_namespace=True,
)
def cleanup(self):
super().cleanup()
if hasattr(self, "alternate_server_runner"):
self.alternate_server_runner.cleanup(
force=self.force_cleanup, force_namespace=self.force_cleanup
)
def test_change_backend_service(self) -> None:
with self.subTest("00_create_health_check"):
self.td.create_health_check()
with self.subTest("01_create_backend_services"):
self.td.create_backend_service()
self.td.create_alternative_backend_service()
with self.subTest("02_create_url_map"):
self.td.create_url_map(self.server_xds_host, self.server_xds_port)
with self.subTest("03_create_target_proxy"):
self.td.create_target_proxy()
with self.subTest("04_create_forwarding_rule"):
self.td.create_forwarding_rule(self.server_xds_port)
default_test_servers: List[_XdsTestServer]
same_zone_test_servers: List[_XdsTestServer]
with self.subTest("05_start_test_servers"):
default_test_servers = self.startTestServers()
same_zone_test_servers = self.startTestServers(
server_runner=self.alternate_server_runner
)
with self.subTest("06_add_server_backends_to_backend_services"):
self.setupServerBackends()
# Add backend to alternative backend service
(
neg_name_alt,
neg_zones_alt,
) = self.alternate_k8s_namespace.get_service_neg(
self.alternate_server_runner.service_name, self.server_port
)
self.td.alternative_backend_service_add_neg_backends(
neg_name_alt, neg_zones_alt
)
test_client: _XdsTestClient
with self.subTest("07_start_test_client"):
test_client = self.startTestClient(default_test_servers[0])
with self.subTest("08_test_client_xds_config_exists"):
self.assertXdsConfigExists(test_client)
with self.subTest("09_test_server_received_rpcs_from_test_client"):
self.assertSuccessfulRpcs(test_client)
with self.subTest("10_change_backend_service"):
self.td.patch_url_map(
self.server_xds_host,
self.server_xds_port,
self.td.alternative_backend_service,
)
self.assertRpcsEventuallyGoToGivenServers(
test_client, same_zone_test_servers
)
if __name__ == "__main__":
absltest.main(failfast=True)
| 4,534
| 36.479339
| 78
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/tests/baseline_test.py
|
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from absl import flags
from absl.testing import absltest
from framework import xds_k8s_testcase
logger = logging.getLogger(__name__)
flags.adopt_module_key_flags(xds_k8s_testcase)
# Type aliases
_XdsTestServer = xds_k8s_testcase.XdsTestServer
_XdsTestClient = xds_k8s_testcase.XdsTestClient
class BaselineTest(xds_k8s_testcase.RegularXdsKubernetesTestCase):
def test_traffic_director_grpc_setup(self):
with self.subTest("0_create_health_check"):
self.td.create_health_check()
with self.subTest("1_create_backend_service"):
self.td.create_backend_service()
with self.subTest("2_create_url_map"):
self.td.create_url_map(self.server_xds_host, self.server_xds_port)
with self.subTest("3_create_target_proxy"):
self.td.create_target_proxy()
with self.subTest("4_create_forwarding_rule"):
self.td.create_forwarding_rule(self.server_xds_port)
with self.subTest("5_start_test_server"):
test_server: _XdsTestServer = self.startTestServers()[0]
with self.subTest("6_add_server_backends_to_backend_service"):
self.setupServerBackends()
with self.subTest("7_start_test_client"):
test_client: _XdsTestClient = self.startTestClient(test_server)
with self.subTest("8_test_client_xds_config_exists"):
self.assertXdsConfigExists(test_client)
with self.subTest("9_test_server_received_rpcs_from_test_client"):
self.assertSuccessfulRpcs(test_client)
if __name__ == "__main__":
absltest.main(failfast=True)
| 2,204
| 33.453125
| 78
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/tests/outlier_detection_test.py
|
# Copyright 2022 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import List
from absl import flags
from absl.testing import absltest
from framework import xds_k8s_flags
from framework import xds_k8s_testcase
from framework import xds_url_map_testcase
from framework.helpers import skips
logger = logging.getLogger(__name__)
flags.adopt_module_key_flags(xds_k8s_testcase)
flags.mark_flag_as_required("server_image_canonical")
# Type aliases
RpcTypeUnaryCall = xds_url_map_testcase.RpcTypeUnaryCall
RpcTypeEmptyCall = xds_url_map_testcase.RpcTypeEmptyCall
_XdsTestServer = xds_k8s_testcase.XdsTestServer
_XdsTestClient = xds_k8s_testcase.XdsTestClient
_Lang = skips.Lang
# Testing consts
_QPS = 100
_REPLICA_COUNT = 5
class OutlierDetectionTest(xds_k8s_testcase.RegularXdsKubernetesTestCase):
"""
Implementation of https://github.com/grpc/grpc/blob/master/doc/xds-test-descriptions.md#outlier_detection
This test verifies that the client applies the outlier detection
configuration and temporarily drops traffic to a server that fails
requests.
"""
@classmethod
def setUpClass(cls):
"""Force the java test server for languages not yet supporting
the `rpc-behavior` feature.
https://github.com/grpc/grpc/blob/master/doc/xds-test-descriptions.md#server
"""
super().setUpClass()
# gRPC Java implemented server "error-code-" rpc-behavior in v1.47.x.
if cls.lang_spec.client_lang == _Lang.JAVA:
return
# gRPC CPP implemented server "hostname" rpc-behavior in v1.57.x,
# see https://github.com/grpc/grpc/pull/33446.
if (
cls.lang_spec.client_lang == _Lang.CPP
and cls.lang_spec.version_gte("v1.57.x")
):
return
# gRPC go, python and node fallback to the gRPC Java.
# TODO(https://github.com/grpc/grpc-go/issues/6288): use go server.
# TODO(https://github.com/grpc/grpc/issues/33134): use python server.
cls.server_image = xds_k8s_flags.SERVER_IMAGE_CANONICAL.value
@staticmethod
def is_supported(config: skips.TestConfig) -> bool:
if config.client_lang in _Lang.CPP | _Lang.PYTHON:
return config.version_gte("v1.48.x")
if config.client_lang == _Lang.JAVA:
return config.version_gte("v1.49.x")
if config.client_lang == _Lang.NODE:
return config.version_gte("v1.6.x")
if config.client_lang == _Lang.GO:
# TODO(zasweq): Update when the feature makes in a version branch.
return config.version_gte("master")
return False
def test_outlier_detection(self) -> None:
with self.subTest("00_create_health_check"):
self.td.create_health_check()
with self.subTest("01_create_backend_service"):
self.td.create_backend_service(
outlier_detection={
"interval": {"seconds": 2, "nanos": 0},
"successRateRequestVolume": 20,
}
)
with self.subTest("02_create_url_map"):
self.td.create_url_map(self.server_xds_host, self.server_xds_port)
with self.subTest("03_create_target_proxy"):
self.td.create_target_proxy()
with self.subTest("04_create_forwarding_rule"):
self.td.create_forwarding_rule(self.server_xds_port)
test_servers: List[_XdsTestServer]
with self.subTest("05_start_test_servers"):
test_servers = self.startTestServers(replica_count=_REPLICA_COUNT)
with self.subTest("06_add_server_backends_to_backend_services"):
self.setupServerBackends()
test_client: _XdsTestClient
with self.subTest("07_start_test_client"):
test_client = self.startTestClient(test_servers[0], qps=_QPS)
with self.subTest("08_test_client_xds_config_exists"):
self.assertXdsConfigExists(test_client)
with self.subTest("09_test_servers_received_rpcs_from_test_client"):
self.assertRpcsEventuallyGoToGivenServers(test_client, test_servers)
rpc_types = (RpcTypeUnaryCall,)
with self.subTest("10_chosen_server_removed_by_outlier_detection"):
test_client.update_config.configure(
rpc_types=rpc_types,
metadata=(
(
RpcTypeUnaryCall,
"rpc-behavior",
f"hostname={test_servers[0].hostname} error-code-2",
),
),
)
self.assertRpcsEventuallyGoToGivenServers(
test_client, test_servers[1:]
)
with self.subTest("11_ejected_server_returned_after_failures_stopped"):
test_client.update_config.configure(rpc_types=rpc_types)
self.assertRpcsEventuallyGoToGivenServers(test_client, test_servers)
if __name__ == "__main__":
absltest.main(failfast=True)
| 5,539
| 36.181208
| 109
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/tests/security_test.py
|
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from absl import flags
from absl.testing import absltest
from framework import xds_k8s_testcase
from framework.helpers import rand
from framework.helpers import skips
logger = logging.getLogger(__name__)
flags.adopt_module_key_flags(xds_k8s_testcase)
# Type aliases
_XdsTestServer = xds_k8s_testcase.XdsTestServer
_XdsTestClient = xds_k8s_testcase.XdsTestClient
_SecurityMode = xds_k8s_testcase.SecurityXdsKubernetesTestCase.SecurityMode
_Lang = skips.Lang
class SecurityTest(xds_k8s_testcase.SecurityXdsKubernetesTestCase):
@staticmethod
def is_supported(config: skips.TestConfig) -> bool:
if config.client_lang in (
_Lang.CPP | _Lang.GO | _Lang.JAVA | _Lang.PYTHON
):
# Versions prior to v1.41.x don't support PSM Security.
# https://github.com/grpc/grpc/blob/master/doc/grpc_xds_features.md
return config.version_gte("v1.41.x")
elif config.client_lang == _Lang.NODE:
return False
return True
def test_mtls(self):
"""mTLS test.
Both client and server configured to use TLS and mTLS.
"""
self.setupTrafficDirectorGrpc()
self.setupSecurityPolicies(
server_tls=True, server_mtls=True, client_tls=True, client_mtls=True
)
test_server: _XdsTestServer = self.startSecureTestServer()
self.setupServerBackends()
test_client: _XdsTestClient = self.startSecureTestClient(test_server)
self.assertTestAppSecurity(_SecurityMode.MTLS, test_client, test_server)
self.assertSuccessfulRpcs(test_client)
logger.info("[SUCCESS] mTLS security mode confirmed.")
def test_tls(self):
"""TLS test.
Both client and server configured to use TLS and not use mTLS.
"""
self.setupTrafficDirectorGrpc()
self.setupSecurityPolicies(
server_tls=True,
server_mtls=False,
client_tls=True,
client_mtls=False,
)
test_server: _XdsTestServer = self.startSecureTestServer()
self.setupServerBackends()
test_client: _XdsTestClient = self.startSecureTestClient(test_server)
self.assertTestAppSecurity(_SecurityMode.TLS, test_client, test_server)
self.assertSuccessfulRpcs(test_client)
logger.info("[SUCCESS] TLS security mode confirmed.")
def test_plaintext_fallback(self):
"""Plain-text fallback test.
Control plane provides no security config so both client and server
fallback to plaintext based on fallback-credentials.
"""
self.setupTrafficDirectorGrpc()
self.setupSecurityPolicies(
server_tls=False,
server_mtls=False,
client_tls=False,
client_mtls=False,
)
test_server: _XdsTestServer = self.startSecureTestServer()
self.setupServerBackends()
test_client: _XdsTestClient = self.startSecureTestClient(test_server)
self.assertTestAppSecurity(
_SecurityMode.PLAINTEXT, test_client, test_server
)
self.assertSuccessfulRpcs(test_client)
logger.info("[SUCCESS] Plaintext security mode confirmed.")
def test_mtls_error(self):
"""Negative test: mTLS Error.
Server expects client mTLS cert, but client configured only for TLS.
Note: because this is a negative test we need to make sure the mTLS
failure happens after receiving the correct configuration at the
client. To ensure that we will perform the following steps in that
sequence:
- Creation of a backendService, and attaching the backend (NEG)
- Creation of the Server mTLS Policy, and attaching to the ECS
- Creation of the Client TLS Policy, and attaching to the backendService
- Creation of the urlMap, targetProxy, and forwardingRule
With this sequence we are sure that when the client receives the
endpoints of the backendService the security-config would also have
been received as confirmed by the TD team.
"""
# Create backend service
self.td.setup_backend_for_grpc(
health_check_port=self.server_maintenance_port
)
# Start server and attach its NEGs to the backend service, but
# until they become healthy.
test_server: _XdsTestServer = self.startSecureTestServer()
self.setupServerBackends(wait_for_healthy_status=False)
# Setup policies and attach them.
self.setupSecurityPolicies(
server_tls=True,
server_mtls=True,
client_tls=True,
client_mtls=False,
)
# Create the routing rule map.
self.td.setup_routing_rule_map_for_grpc(
self.server_xds_host, self.server_xds_port
)
# Now that TD setup is complete, Backend Service can be populated
# with healthy backends (NEGs).
self.td.wait_for_backends_healthy_status()
# Start the client, but don't wait for it to report a healthy channel.
test_client: _XdsTestClient = self.startSecureTestClient(
test_server, wait_for_active_server_channel=False
)
self.assertClientCannotReachServerRepeatedly(test_client)
logger.info(
"[SUCCESS] Client's connectivity state is consistent with a mTLS "
"error caused by not presenting mTLS certificate to the server."
)
def test_server_authz_error(self):
"""Negative test: AuthZ error.
Client does not authorize server because of mismatched SAN name.
The order of operations is the same as in `test_mtls_error`.
"""
# Create backend service
self.td.setup_backend_for_grpc(
health_check_port=self.server_maintenance_port
)
# Start server and attach its NEGs to the backend service, but
# until they become healthy.
test_server: _XdsTestServer = self.startSecureTestServer()
self.setupServerBackends(wait_for_healthy_status=False)
# Regular TLS setup, but with client policy configured using
# intentionality incorrect server_namespace.
self.td.setup_server_security(
server_namespace=self.server_namespace,
server_name=self.server_name,
server_port=self.server_port,
tls=True,
mtls=False,
)
incorrect_namespace = f"incorrect-namespace-{rand.rand_string()}"
self.td.setup_client_security(
server_namespace=incorrect_namespace,
server_name=self.server_name,
tls=True,
mtls=False,
)
# Create the routing rule map.
self.td.setup_routing_rule_map_for_grpc(
self.server_xds_host, self.server_xds_port
)
# Now that TD setup is complete, Backend Service can be populated
# with healthy backends (NEGs).
self.td.wait_for_backends_healthy_status()
# Start the client, but don't wait for it to report a healthy channel.
test_client: _XdsTestClient = self.startSecureTestClient(
test_server, wait_for_active_server_channel=False
)
self.assertClientCannotReachServerRepeatedly(test_client)
logger.info(
"[SUCCESS] Client's connectivity state is consistent with "
"AuthZ error caused by server presenting incorrect SAN."
)
if __name__ == "__main__":
absltest.main()
| 8,136
| 35.986364
| 80
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/tests/affinity_test.py
|
# Copyright 2021 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
from typing import List
from absl import flags
from absl.testing import absltest
from google.protobuf import json_format
from framework import xds_k8s_flags
from framework import xds_k8s_testcase
from framework import xds_url_map_testcase
from framework.helpers import skips
from framework.rpc import grpc_channelz
logger = logging.getLogger(__name__)
flags.adopt_module_key_flags(xds_k8s_testcase)
# Type aliases
_XdsTestServer = xds_k8s_testcase.XdsTestServer
_XdsTestClient = xds_k8s_testcase.XdsTestClient
_ChannelzChannelState = grpc_channelz.ChannelState
_Lang = skips.Lang
# Testing consts
_TEST_AFFINITY_METADATA_KEY = "xds_md"
_TD_PROPAGATE_CHECK_INTERVAL_SEC = 10
_TD_PROPAGATE_TIMEOUT = 600
_REPLICA_COUNT = 3
_RPC_COUNT = 100
class AffinityTest(xds_k8s_testcase.RegularXdsKubernetesTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
# Force the python client to use the reference server image (Java)
# because the python server doesn't yet support set_not_serving RPC.
# TODO(https://github.com/grpc/grpc/issues/30635): Remove when resolved.
if cls.lang_spec.client_lang == _Lang.PYTHON:
cls.server_image = xds_k8s_flags.SERVER_IMAGE_CANONICAL.value
@staticmethod
def is_supported(config: skips.TestConfig) -> bool:
if config.client_lang in _Lang.CPP | _Lang.JAVA:
return config.version_gte("v1.40.x")
elif config.client_lang == _Lang.GO:
return config.version_gte("v1.41.x")
elif config.client_lang == _Lang.PYTHON:
# TODO(https://github.com/grpc/grpc/issues/27430): supported after
# the issue is fixed.
return False
elif config.client_lang == _Lang.NODE:
return False
return True
def test_affinity(self) -> None: # pylint: disable=too-many-statements
with self.subTest("00_create_health_check"):
self.td.create_health_check()
with self.subTest("01_create_backend_services"):
self.td.create_backend_service(
affinity_header=_TEST_AFFINITY_METADATA_KEY
)
with self.subTest("02_create_url_map"):
self.td.create_url_map(self.server_xds_host, self.server_xds_port)
with self.subTest("03_create_target_proxy"):
self.td.create_target_proxy()
with self.subTest("04_create_forwarding_rule"):
self.td.create_forwarding_rule(self.server_xds_port)
test_servers: List[_XdsTestServer]
with self.subTest("05_start_test_servers"):
test_servers = self.startTestServers(replica_count=_REPLICA_COUNT)
with self.subTest("06_add_server_backends_to_backend_services"):
self.setupServerBackends()
test_client: _XdsTestClient
with self.subTest("07_start_test_client"):
test_client = self.startTestClient(
test_servers[0],
rpc="EmptyCall",
metadata="EmptyCall:%s:123" % _TEST_AFFINITY_METADATA_KEY,
)
# Validate the number of received endpoints and affinity configs.
config = test_client.csds.fetch_client_status(
log_level=logging.INFO
)
self.assertIsNotNone(config)
json_config = json_format.MessageToDict(config)
parsed = xds_url_map_testcase.DumpedXdsConfig(json_config)
logging.info("Client received CSDS response: %s", parsed)
self.assertLen(parsed.endpoints, _REPLICA_COUNT)
self.assertEqual(
parsed.rds["virtualHosts"][0]["routes"][0]["route"][
"hashPolicy"
][0]["header"]["headerName"],
_TEST_AFFINITY_METADATA_KEY,
)
self.assertEqual(parsed.cds[0]["lbPolicy"], "RING_HASH")
with self.subTest("08_test_client_xds_config_exists"):
self.assertXdsConfigExists(test_client)
with self.subTest("09_test_server_received_rpcs_from_test_client"):
self.assertSuccessfulRpcs(test_client)
with self.subTest("10_first_100_affinity_rpcs_pick_same_backend"):
rpc_stats = self.getClientRpcStats(test_client, _RPC_COUNT)
json_lb_stats = json_format.MessageToDict(rpc_stats)
rpc_distribution = xds_url_map_testcase.RpcDistributionStats(
json_lb_stats
)
self.assertEqual(1, rpc_distribution.num_peers)
# Check subchannel states.
# One should be READY.
ready_channels = test_client.find_subchannels_with_state(
_ChannelzChannelState.READY
)
self.assertLen(
ready_channels,
1,
msg=(
"(AffinityTest) The client expected to have one READY"
" subchannel to one of the test servers. Found"
f" {len(ready_channels)} instead."
),
)
# Any remaining subchannels may be in any state.
# Remember the backend inuse, and turn it down later.
first_backend_inuse = list(
rpc_distribution.raw["rpcsByPeer"].keys()
)[0]
with self.subTest("11_turn_down_server_in_use"):
for server in test_servers:
if server.hostname == first_backend_inuse:
server.set_not_serving()
with self.subTest("12_wait_for_unhealth_status_propagation"):
deadline = time.time() + _TD_PROPAGATE_TIMEOUT
parsed = None
try:
while time.time() < deadline:
config = test_client.csds.fetch_client_status(
log_level=logging.INFO
)
self.assertIsNotNone(config)
json_config = json_format.MessageToDict(config)
parsed = xds_url_map_testcase.DumpedXdsConfig(json_config)
if len(parsed.endpoints) == _REPLICA_COUNT - 1:
break
logging.info(
(
"CSDS got unexpected endpoints, will retry after %d"
" seconds"
),
_TD_PROPAGATE_CHECK_INTERVAL_SEC,
)
time.sleep(_TD_PROPAGATE_CHECK_INTERVAL_SEC)
else:
self.fail(
"unhealthy status did not propagate after 600 seconds"
)
finally:
logging.info("Client received CSDS response: %s", parsed)
with self.subTest("12_next_100_affinity_rpcs_pick_different_backend"):
rpc_stats = self.getClientRpcStats(test_client, _RPC_COUNT)
json_lb_stats = json_format.MessageToDict(rpc_stats)
rpc_distribution = xds_url_map_testcase.RpcDistributionStats(
json_lb_stats
)
self.assertEqual(1, rpc_distribution.num_peers)
new_backend_inuse = list(rpc_distribution.raw["rpcsByPeer"].keys())[
0
]
self.assertNotEqual(new_backend_inuse, first_backend_inuse)
if __name__ == "__main__":
absltest.main(failfast=True)
| 7,987
| 38.544554
| 80
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/tests/__init__.py
|
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 577
| 40.285714
| 74
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/tests/custom_lb_test.py
|
# Copyright 2022 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
from absl import flags
from absl.testing import absltest
import grpc
from framework import xds_k8s_flags
from framework import xds_k8s_testcase
from framework.helpers import skips
logger = logging.getLogger(__name__)
flags.adopt_module_key_flags(xds_k8s_testcase)
# Type aliases
_XdsTestServer = xds_k8s_testcase.XdsTestServer
_XdsTestClient = xds_k8s_testcase.XdsTestClient
_Lang = skips.Lang
_EXPECTED_STATUS = grpc.StatusCode.DATA_LOSS
class CustomLbTest(xds_k8s_testcase.RegularXdsKubernetesTestCase):
@classmethod
def setUpClass(cls):
"""Force the java test server for languages not yet supporting
the `rpc-behavior` feature.
https://github.com/grpc/grpc/blob/master/doc/xds-test-descriptions.md#server
"""
super().setUpClass()
# gRPC Java implemented server "error-code-" rpc-behavior in v1.47.x.
# gRPC CPP implemented rpc-behavior in the same version, as custom_lb.
if cls.lang_spec.client_lang in _Lang.JAVA | _Lang.CPP:
return
# gRPC go, python and node fallback to the gRPC Java.
# TODO(https://github.com/grpc/grpc-go/issues/6288): use go server.
# TODO(https://github.com/grpc/grpc/issues/33134): use python server.
cls.server_image = xds_k8s_flags.SERVER_IMAGE_CANONICAL.value
@staticmethod
def is_supported(config: skips.TestConfig) -> bool:
if config.client_lang == _Lang.JAVA:
return config.version_gte("v1.47.x")
if config.client_lang == _Lang.CPP:
return config.version_gte("v1.55.x")
if config.client_lang == _Lang.GO:
return config.version_gte("v1.56.x")
return False
def test_custom_lb_config(self):
with self.subTest("0_create_health_check"):
self.td.create_health_check()
# Configures a custom, test LB on the client to instruct the servers
# to always respond with a specific error code.
#
# The first policy in the list is a non-existent one to verify that
# the gRPC client can gracefully move down the list to the valid one
# once it determines the first one is not available.
with self.subTest("1_create_backend_service"):
self.td.create_backend_service(
locality_lb_policies=[
{
"customPolicy": {
"name": "test.ThisLoadBalancerDoesNotExist",
"data": '{ "foo": "bar" }',
},
},
{
"customPolicy": {
"name": "test.RpcBehaviorLoadBalancer",
"data": (
'{ "rpcBehavior":'
f' "error-code-{_EXPECTED_STATUS.value[0]}" }}'
),
}
},
]
)
with self.subTest("2_create_url_map"):
self.td.create_url_map(self.server_xds_host, self.server_xds_port)
with self.subTest("3_create_target_proxy"):
self.td.create_target_proxy()
with self.subTest("4_create_forwarding_rule"):
self.td.create_forwarding_rule(self.server_xds_port)
with self.subTest("5_start_test_server"):
test_server: _XdsTestServer = self.startTestServers()[0]
with self.subTest("6_add_server_backends_to_backend_service"):
self.setupServerBackends()
with self.subTest("7_start_test_client"):
test_client: _XdsTestClient = self.startTestClient(test_server)
with self.subTest("8_test_client_xds_config_exists"):
self.assertXdsConfigExists(test_client)
# Verify status codes from the servers have the configured one.
with self.subTest("9_test_server_returned_configured_status_code"):
self.assertRpcStatusCodes(
test_client,
expected_status=_EXPECTED_STATUS,
duration=datetime.timedelta(seconds=10),
method="UNARY_CALL",
)
if __name__ == "__main__":
absltest.main(failfast=True)
| 4,835
| 36.78125
| 84
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/tests/failover_test.py
|
# Copyright 2021 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import List
from absl import flags
from absl.testing import absltest
from framework import xds_k8s_flags
from framework import xds_k8s_testcase
from framework.helpers import skips
from framework.infrastructure import k8s
from framework.test_app.runners.k8s import k8s_xds_server_runner
logger = logging.getLogger(__name__)
flags.adopt_module_key_flags(xds_k8s_testcase)
# Type aliases
_Lang = skips.Lang
_XdsTestServer = xds_k8s_testcase.XdsTestServer
_XdsTestClient = xds_k8s_testcase.XdsTestClient
_KubernetesServerRunner = k8s_xds_server_runner.KubernetesServerRunner
class FailoverTest(xds_k8s_testcase.RegularXdsKubernetesTestCase):
REPLICA_COUNT = 3
MAX_RATE_PER_ENDPOINT = 100
@classmethod
def setUpClass(cls):
super().setUpClass()
# Force the python client to use the reference server image (Java)
# because the python server doesn't yet support set_not_serving RPC.
# TODO(https://github.com/grpc/grpc/issues/30635): Remove when resolved.
if cls.lang_spec.client_lang == _Lang.PYTHON:
cls.server_image = xds_k8s_flags.SERVER_IMAGE_CANONICAL.value
def setUp(self):
super().setUp()
self.secondary_server_runner = _KubernetesServerRunner(
k8s.KubernetesNamespace(
self.secondary_k8s_api_manager, self.server_namespace
),
deployment_name=self.server_name + "-alt",
image_name=self.server_image,
gcp_service_account=self.gcp_service_account,
td_bootstrap_image=self.td_bootstrap_image,
gcp_project=self.project,
gcp_api_manager=self.gcp_api_manager,
xds_server_uri=self.xds_server_uri,
network=self.network,
debug_use_port_forwarding=self.debug_use_port_forwarding,
# This runner's namespace created in the secondary cluster,
# so it's not reused and must be cleaned up.
reuse_namespace=False,
)
def cleanup(self):
super().cleanup()
if hasattr(self, "secondary_server_runner"):
self.secondary_server_runner.cleanup(
force=self.force_cleanup, force_namespace=self.force_cleanup
)
def test_failover(self) -> None:
with self.subTest("00_create_health_check"):
self.td.create_health_check()
with self.subTest("01_create_backend_services"):
self.td.create_backend_service()
with self.subTest("02_create_url_map"):
self.td.create_url_map(self.server_xds_host, self.server_xds_port)
with self.subTest("03_create_target_proxy"):
self.td.create_target_proxy()
with self.subTest("04_create_forwarding_rule"):
self.td.create_forwarding_rule(self.server_xds_port)
default_test_servers: List[_XdsTestServer]
alternate_test_servers: List[_XdsTestServer]
with self.subTest("05_start_test_servers"):
default_test_servers = self.startTestServers(
replica_count=self.REPLICA_COUNT
)
alternate_test_servers = self.startTestServers(
server_runner=self.secondary_server_runner
)
with self.subTest("06_add_server_backends_to_backend_services"):
self.setupServerBackends(
max_rate_per_endpoint=self.MAX_RATE_PER_ENDPOINT
)
self.setupServerBackends(
server_runner=self.secondary_server_runner,
max_rate_per_endpoint=self.MAX_RATE_PER_ENDPOINT,
)
test_client: _XdsTestClient
with self.subTest("07_start_test_client"):
test_client = self.startTestClient(default_test_servers[0])
with self.subTest("08_test_client_xds_config_exists"):
self.assertXdsConfigExists(test_client)
with self.subTest("09_primary_locality_receives_requests"):
self.assertRpcsEventuallyGoToGivenServers(
test_client, default_test_servers
)
with self.subTest(
"10_secondary_locality_receives_no_requests_on_partial_primary_failure"
):
default_test_servers[0].set_not_serving()
self.assertRpcsEventuallyGoToGivenServers(
test_client, default_test_servers[1:]
)
with self.subTest("11_gentle_failover"):
default_test_servers[1].set_not_serving()
self.assertRpcsEventuallyGoToGivenServers(
test_client, default_test_servers[2:] + alternate_test_servers
)
with self.subTest(
"12_secondary_locality_receives_requests_on_primary_failure"
):
default_test_servers[2].set_not_serving()
self.assertRpcsEventuallyGoToGivenServers(
test_client, alternate_test_servers
)
with self.subTest("13_traffic_resumes_to_healthy_backends"):
for i in range(self.REPLICA_COUNT):
default_test_servers[i].set_serving()
self.assertRpcsEventuallyGoToGivenServers(
test_client, default_test_servers
)
if __name__ == "__main__":
absltest.main(failfast=True)
| 5,855
| 36.538462
| 83
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/tests/bootstrap_generator_test.py
|
# Copyright 2022 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
from typing import List
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
from framework import bootstrap_generator_testcase
from framework import xds_k8s_testcase
from framework.helpers import retryers
from framework.test_app.runners.k8s import k8s_xds_client_runner
from framework.test_app.runners.k8s import k8s_xds_server_runner
logger = logging.getLogger(__name__)
flags.adopt_module_key_flags(xds_k8s_testcase)
# Type aliases
XdsTestServer = xds_k8s_testcase.XdsTestServer
XdsTestClient = xds_k8s_testcase.XdsTestClient
KubernetesServerRunner = k8s_xds_server_runner.KubernetesServerRunner
KubernetesClientRunner = k8s_xds_client_runner.KubernetesClientRunner
_timedelta = datetime.timedelta
# Returns a list of bootstrap generator versions to be tested along with their
# image names.
#
# Whenever we release a new version of the bootstrap generator, we need to add a
# corresponding entry here.
#
# TODO: Update bootstrap generator release instructions to add an entry here,
# after the release is published.
def bootstrap_version_testcases() -> List:
return (
dict(
version="v0.14.0",
image="gcr.io/grpc-testing/td-grpc-bootstrap:d6baaf7b0e0c63054ac4d9bedc09021ff261d599",
),
dict(
version="v0.13.0",
image="gcr.io/grpc-testing/td-grpc-bootstrap:203db6ce70452996f4183c30dd4c5ecaada168b0",
),
dict(
version="v0.12.0",
image="gcr.io/grpc-testing/td-grpc-bootstrap:8765051ef3b742bc5cd20f16de078ae7547f2ba2",
),
dict(
version="v0.11.0",
image="gcr.io/grpc-testing/td-grpc-bootstrap:b96f7a73314668aee83cbf86ab1e40135a0542fc",
),
# v0.10.0 uses v2 xDS transport protocol by default. TD only supports v3
# and we can force the bootstrap generator to emit config with v3
# support by setting the --include-v3-features-experimental flag to
# true.
#
# TODO: Figure out how to pass flags to the bootstrap generator via the
# client and server runners, and uncomment this version.
# ('v0.10.0', 'gcr.io/grpc-testing/td-grpc-bootstrap:66de7ea0e170351c9fae17232b81adbfb3e80ec3'),
)
# TODO: Reuse service account and namespaces for significant improvements in
# running time.
class BootstrapGeneratorClientTest(
bootstrap_generator_testcase.BootstrapGeneratorBaseTest,
parameterized.TestCase,
):
client_runner: KubernetesClientRunner
server_runner: KubernetesServerRunner
test_server: XdsTestServer
@classmethod
def setUpClass(cls):
"""Hook method for setting up class fixture before running tests in
the class.
"""
super().setUpClass()
# For client tests, we use a single server instance that can be shared
# across all the parameterized clients. And this server runner will use
# the version of the bootstrap generator as configured via the
# --td_bootstrap_image flag.
cls.server_runner = cls.initKubernetesServerRunner()
cls.test_server = cls.startTestServer(
server_runner=cls.server_runner,
port=cls.server_port,
maintenance_port=cls.server_maintenance_port,
xds_host=cls.server_xds_host,
xds_port=cls.server_xds_port,
)
# Load backends.
neg_name, neg_zones = cls.server_runner.k8s_namespace.get_service_neg(
cls.server_runner.service_name, cls.server_port
)
# Add backends to the Backend Service.
cls.td.backend_service_add_neg_backends(neg_name, neg_zones)
cls.td.wait_for_backends_healthy_status()
@classmethod
def tearDownClass(cls):
# Remove backends from the Backend Service before closing the server
# runner.
neg_name, neg_zones = cls.server_runner.k8s_namespace.get_service_neg(
cls.server_runner.service_name, cls.server_port
)
cls.td.backend_service_remove_neg_backends(neg_name, neg_zones)
cls.server_runner.cleanup(force=cls.force_cleanup)
super().tearDownClass()
def tearDown(self):
logger.info("----- TestMethod %s teardown -----", self.id())
retryer = retryers.constant_retryer(
wait_fixed=_timedelta(seconds=10),
attempts=3,
log_level=logging.INFO,
)
try:
retryer(self._cleanup)
except retryers.RetryError:
logger.exception("Got error during teardown")
super().tearDown()
def _cleanup(self):
self.client_runner.cleanup(force=self.force_cleanup)
@parameterized.parameters(
(t["version"], t["image"]) for t in bootstrap_version_testcases()
)
def test_baseline_in_client_with_bootstrap_version(self, version, image):
"""Runs the baseline test for multiple versions of the bootstrap
generator on the client.
"""
logger.info(
"----- testing bootstrap generator version %s -----", version
)
self.client_runner = self.initKubernetesClientRunner(
td_bootstrap_image=image
)
test_client: XdsTestClient = self.startTestClient(self.test_server)
self.assertXdsConfigExists(test_client)
self.assertSuccessfulRpcs(test_client)
# TODO: Use unique client and server deployment names while creating the
# corresponding runners, by suffixing the version of the bootstrap generator
# being tested. Then, run these in parallel.
class BootstrapGeneratorServerTest(
bootstrap_generator_testcase.BootstrapGeneratorBaseTest,
parameterized.TestCase,
):
client_runner: KubernetesClientRunner
server_runner: KubernetesServerRunner
test_server: XdsTestServer
def tearDown(self):
logger.info("----- TestMethod %s teardown -----", self.id())
retryer = retryers.constant_retryer(
wait_fixed=_timedelta(seconds=10),
attempts=3,
log_level=logging.INFO,
)
try:
retryer(self._cleanup)
except retryers.RetryError:
logger.exception("Got error during teardown")
super().tearDown()
def _cleanup(self):
self.client_runner.cleanup(force=self.force_cleanup)
self.removeServerBackends()
self.server_runner.cleanup(force=self.force_cleanup)
@parameterized.parameters(
(t["version"], t["image"]) for t in bootstrap_version_testcases()
)
def test_baseline_in_server_with_bootstrap_version(self, version, image):
"""Runs the baseline test for multiple versions of the bootstrap
generator on the server.
"""
logger.info(
"----- Testing bootstrap generator version %s -----", version
)
self.server_runner = self.initKubernetesServerRunner(
td_bootstrap_image=image
)
self.test_server = self.startTestServer(
server_runner=self.server_runner,
port=self.server_port,
maintenance_port=self.server_maintenance_port,
xds_host=self.server_xds_host,
xds_port=self.server_xds_port,
)
# Load backends.
neg_name, neg_zones = self.server_runner.k8s_namespace.get_service_neg(
self.server_runner.service_name, self.server_port
)
# Add backends to the Backend Service.
self.td.backend_service_add_neg_backends(neg_name, neg_zones)
self.td.wait_for_backends_healthy_status()
self.client_runner = self.initKubernetesClientRunner()
test_client: XdsTestClient = self.startTestClient(self.test_server)
self.assertXdsConfigExists(test_client)
self.assertSuccessfulRpcs(test_client)
if __name__ == "__main__":
absltest.main()
| 8,479
| 36.356828
| 104
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/tests/url_map/__main__.py
|
# Copyright 2021 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from absl.testing import absltest
from framework import xds_url_map_testcase # Needed for xDS flags
_TEST_CASE_FOLDER = os.path.dirname(__file__)
def load_tests(loader: absltest.TestLoader, unused_tests, unused_pattern):
return loader.discover(
_TEST_CASE_FOLDER,
pattern="*" + xds_url_map_testcase.URL_MAP_TESTCASE_FILE_SUFFIX,
)
if __name__ == "__main__":
absltest.main()
| 1,001
| 29.363636
| 74
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/tests/url_map/metadata_filter_test.py
|
# Copyright 2022 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Tuple
from absl import flags
from absl.testing import absltest
from framework import xds_url_map_testcase
from framework.test_app import client_app
# Type aliases
HostRule = xds_url_map_testcase.HostRule
PathMatcher = xds_url_map_testcase.PathMatcher
GcpResourceManager = xds_url_map_testcase.GcpResourceManager
DumpedXdsConfig = xds_url_map_testcase.DumpedXdsConfig
RpcTypeUnaryCall = xds_url_map_testcase.RpcTypeUnaryCall
RpcTypeEmptyCall = xds_url_map_testcase.RpcTypeEmptyCall
XdsTestClient = client_app.XdsTestClient
logger = logging.getLogger(__name__)
flags.adopt_module_key_flags(xds_url_map_testcase)
_NUM_RPCS = 150
_TEST_METADATA_KEY = "xds_md"
_TEST_METADATA_VALUE_EMPTY = "empty_ytpme"
_TEST_METADATA = (
(RpcTypeEmptyCall, _TEST_METADATA_KEY, _TEST_METADATA_VALUE_EMPTY),
)
match_labels = [
{"name": "TRAFFICDIRECTOR_NETWORK_NAME", "value": "default-vpc"}
]
not_match_labels = [{"name": "fake", "value": "fail"}]
class TestMetadataFilterMatchAll(xds_url_map_testcase.XdsUrlMapTestCase):
""" " The test url-map has two routeRules: the higher priority routes to
the default backends, but is supposed to be filtered out by TD because
of non-matching metadata filters. The lower priority routes to alternative
backends and metadata filter matches. Thus, it verifies that TD evaluates
metadata filters correctly."""
@staticmethod
def url_map_change(
host_rule: HostRule, path_matcher: PathMatcher
) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [
{
"priority": 0,
"matchRules": [
{
"prefixMatch": "/",
"metadataFilters": [
{
"filterMatchCriteria": "MATCH_ALL",
"filterLabels": not_match_labels,
}
],
}
],
"service": GcpResourceManager().default_backend_service(),
},
{
"priority": 1,
"matchRules": [
{
"prefixMatch": "/grpc.testing.TestService/Empty",
"headerMatches": [
{
"headerName": _TEST_METADATA_KEY,
"exactMatch": _TEST_METADATA_VALUE_EMPTY,
}
],
"metadataFilters": [
{
"filterMatchCriteria": "MATCH_ALL",
"filterLabels": match_labels,
}
],
}
],
"service": GcpResourceManager().alternative_backend_service(),
},
]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 2)
self.assertEqual(len(xds_config.rds["virtualHosts"][0]["routes"]), 2)
self.assertEqual(
xds_config.rds["virtualHosts"][0]["routes"][0]["match"]["prefix"],
"/grpc.testing.TestService/Empty",
)
self.assertEqual(
xds_config.rds["virtualHosts"][0]["routes"][0]["match"]["headers"][
0
]["name"],
_TEST_METADATA_KEY,
)
self.assertEqual(
xds_config.rds["virtualHosts"][0]["routes"][0]["match"]["headers"][
0
]["exactMatch"],
_TEST_METADATA_VALUE_EMPTY,
)
self.assertEqual(
xds_config.rds["virtualHosts"][0]["routes"][1]["match"]["prefix"],
"",
)
def rpc_distribution_validate(self, test_client: XdsTestClient):
rpc_distribution = self.configure_and_send(
test_client,
rpc_types=[RpcTypeEmptyCall],
metadata=_TEST_METADATA,
num_rpcs=_NUM_RPCS,
)
self.assertEqual(
_NUM_RPCS, rpc_distribution.empty_call_alternative_service_rpc_count
)
class TestMetadataFilterMatchAny(xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def url_map_change(
host_rule: HostRule, path_matcher: PathMatcher
) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [
{
"priority": 0,
"matchRules": [
{
"prefixMatch": "/",
"metadataFilters": [
{
"filterMatchCriteria": "MATCH_ANY",
"filterLabels": not_match_labels,
}
],
}
],
"service": GcpResourceManager().default_backend_service(),
},
{
"priority": 1,
"matchRules": [
{
"prefixMatch": "/grpc.testing.TestService/Unary",
"metadataFilters": [
{
"filterMatchCriteria": "MATCH_ANY",
"filterLabels": not_match_labels + match_labels,
}
],
}
],
"service": GcpResourceManager().alternative_backend_service(),
},
]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 2)
self.assertEqual(len(xds_config.rds["virtualHosts"][0]["routes"]), 2)
self.assertEqual(
xds_config.rds["virtualHosts"][0]["routes"][0]["match"]["prefix"],
"/grpc.testing.TestService/Unary",
)
self.assertEqual(
xds_config.rds["virtualHosts"][0]["routes"][1]["match"]["prefix"],
"",
)
def rpc_distribution_validate(self, test_client: XdsTestClient):
rpc_distribution = self.configure_and_send(
test_client, rpc_types=(RpcTypeUnaryCall,), num_rpcs=_NUM_RPCS
)
self.assertEqual(
_NUM_RPCS, rpc_distribution.unary_call_alternative_service_rpc_count
)
class TestMetadataFilterMatchAnyAndAll(xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def url_map_change(
host_rule: HostRule, path_matcher: PathMatcher
) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [
{
"priority": 0,
"matchRules": [
{
"prefixMatch": "/",
"metadataFilters": [
{
"filterMatchCriteria": "MATCH_ALL",
"filterLabels": not_match_labels + match_labels,
}
],
}
],
"service": GcpResourceManager().default_backend_service(),
},
{
"priority": 1,
"matchRules": [
{
"prefixMatch": "/grpc.testing.TestService/Unary",
"metadataFilters": [
{
"filterMatchCriteria": "MATCH_ANY",
"filterLabels": not_match_labels + match_labels,
}
],
}
],
"service": GcpResourceManager().alternative_backend_service(),
},
]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 2)
self.assertEqual(len(xds_config.rds["virtualHosts"][0]["routes"]), 2)
self.assertEqual(
xds_config.rds["virtualHosts"][0]["routes"][0]["match"]["prefix"],
"/grpc.testing.TestService/Unary",
)
self.assertEqual(
xds_config.rds["virtualHosts"][0]["routes"][1]["match"]["prefix"],
"",
)
def rpc_distribution_validate(self, test_client: XdsTestClient):
rpc_distribution = self.configure_and_send(
test_client, rpc_types=(RpcTypeUnaryCall,), num_rpcs=_NUM_RPCS
)
self.assertEqual(
_NUM_RPCS, rpc_distribution.unary_call_alternative_service_rpc_count
)
class TestMetadataFilterMatchMultipleRules(
xds_url_map_testcase.XdsUrlMapTestCase
):
@staticmethod
def url_map_change(
host_rule: HostRule, path_matcher: PathMatcher
) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [
{
"priority": 0,
"matchRules": [
{
"prefixMatch": "/",
"headerMatches": [
{
"headerName": _TEST_METADATA_KEY,
"exactMatch": _TEST_METADATA_VALUE_EMPTY,
}
],
"metadataFilters": [
{
"filterMatchCriteria": "MATCH_ANY",
"filterLabels": match_labels,
}
],
}
],
"service": GcpResourceManager().alternative_backend_service(),
},
{
"priority": 1,
"matchRules": [
{
"prefixMatch": "/grpc.testing.TestService/Unary",
"metadataFilters": [
{
"filterMatchCriteria": "MATCH_ALL",
"filterLabels": match_labels,
}
],
}
],
"service": GcpResourceManager().default_backend_service(),
},
]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 2)
self.assertEqual(len(xds_config.rds["virtualHosts"][0]["routes"]), 3)
self.assertEqual(
xds_config.rds["virtualHosts"][0]["routes"][0]["match"]["headers"][
0
]["name"],
_TEST_METADATA_KEY,
)
self.assertEqual(
xds_config.rds["virtualHosts"][0]["routes"][0]["match"]["headers"][
0
]["exactMatch"],
_TEST_METADATA_VALUE_EMPTY,
)
self.assertEqual(
xds_config.rds["virtualHosts"][0]["routes"][1]["match"]["prefix"],
"/grpc.testing.TestService/Unary",
)
self.assertEqual(
xds_config.rds["virtualHosts"][0]["routes"][2]["match"]["prefix"],
"",
)
def rpc_distribution_validate(self, test_client: XdsTestClient):
rpc_distribution = self.configure_and_send(
test_client,
rpc_types=[RpcTypeEmptyCall],
metadata=_TEST_METADATA,
num_rpcs=_NUM_RPCS,
)
self.assertEqual(
_NUM_RPCS, rpc_distribution.empty_call_alternative_service_rpc_count
)
if __name__ == "__main__":
absltest.main()
| 12,333
| 35.383481
| 80
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/tests/url_map/header_matching_test.py
|
# Copyright 2021 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Tuple
from absl import flags
from absl.testing import absltest
from framework import xds_url_map_testcase
from framework.helpers import skips
from framework.test_app import client_app
# Type aliases
HostRule = xds_url_map_testcase.HostRule
PathMatcher = xds_url_map_testcase.PathMatcher
GcpResourceManager = xds_url_map_testcase.GcpResourceManager
DumpedXdsConfig = xds_url_map_testcase.DumpedXdsConfig
RpcTypeUnaryCall = xds_url_map_testcase.RpcTypeUnaryCall
RpcTypeEmptyCall = xds_url_map_testcase.RpcTypeEmptyCall
XdsTestClient = client_app.XdsTestClient
_Lang = skips.Lang
logger = logging.getLogger(__name__)
flags.adopt_module_key_flags(xds_url_map_testcase)
_NUM_RPCS = 150
_TEST_METADATA_KEY = "xds_md"
_TEST_METADATA_VALUE_UNARY = "unary_yranu"
_TEST_METADATA_VALUE_EMPTY = "empty_ytpme"
_TEST_METADATA_NUMERIC_KEY = "xds_md_numeric"
_TEST_METADATA_NUMERIC_VALUE = "159"
_TEST_METADATA = (
(RpcTypeUnaryCall, _TEST_METADATA_KEY, _TEST_METADATA_VALUE_UNARY),
(RpcTypeEmptyCall, _TEST_METADATA_KEY, _TEST_METADATA_VALUE_EMPTY),
(
RpcTypeUnaryCall,
_TEST_METADATA_NUMERIC_KEY,
_TEST_METADATA_NUMERIC_VALUE,
),
)
def _is_supported(config: skips.TestConfig) -> bool:
if config.client_lang == _Lang.NODE:
return config.version_gte("v1.3.x")
return True
class TestExactMatch(xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def is_supported(config: skips.TestConfig) -> bool:
return _is_supported(config)
@staticmethod
def url_map_change(
host_rule: HostRule, path_matcher: PathMatcher
) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [
{
"priority": 0,
# Header ExactMatch -> alternate_backend_service.
# EmptyCall is sent with the metadata.
"matchRules": [
{
"prefixMatch": "/",
"headerMatches": [
{
"headerName": _TEST_METADATA_KEY,
"exactMatch": _TEST_METADATA_VALUE_EMPTY,
}
],
}
],
"service": GcpResourceManager().alternative_backend_service(),
}
]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 2)
self.assertEqual(
xds_config.rds["virtualHosts"][0]["routes"][0]["match"]["headers"][
0
]["name"],
_TEST_METADATA_KEY,
)
self.assertEqual(
xds_config.rds["virtualHosts"][0]["routes"][0]["match"]["headers"][
0
]["exactMatch"],
_TEST_METADATA_VALUE_EMPTY,
)
def rpc_distribution_validate(self, test_client: XdsTestClient):
rpc_distribution = self.configure_and_send(
test_client,
rpc_types=[RpcTypeEmptyCall],
metadata=_TEST_METADATA,
num_rpcs=_NUM_RPCS,
)
self.assertEqual(
_NUM_RPCS, rpc_distribution.empty_call_alternative_service_rpc_count
)
@absltest.skip("the xDS config is good, but distribution is wrong.")
class TestPrefixMatch(xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def is_supported(config: skips.TestConfig) -> bool:
return _is_supported(config)
@staticmethod
def url_map_change(
host_rule: HostRule, path_matcher: PathMatcher
) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [
{
"priority": 0,
# Header PrefixMatch -> alternate_backend_service.
# UnaryCall is sent with the metadata.
"matchRules": [
{
"prefixMatch": "/",
"headerMatches": [
{
"headerName": _TEST_METADATA_KEY,
"prefixMatch": _TEST_METADATA_VALUE_UNARY[:2],
}
],
}
],
"service": GcpResourceManager().alternative_backend_service(),
}
]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 2)
self.assertEqual(
xds_config.rds["virtualHosts"][0]["routes"][0]["match"]["headers"][
0
]["name"],
_TEST_METADATA_KEY,
)
self.assertEqual(
xds_config.rds["virtualHosts"][0]["routes"][0]["match"]["headers"][
0
]["prefixMatch"],
_TEST_METADATA_VALUE_UNARY[:2],
)
def rpc_distribution_validate(self, test_client: XdsTestClient):
rpc_distribution = self.configure_and_send(
test_client,
rpc_types=(RpcTypeUnaryCall,),
metadata=_TEST_METADATA,
num_rpcs=_NUM_RPCS,
)
self.assertEqual(
_NUM_RPCS, rpc_distribution.unary_call_alternative_service_rpc_count
)
class TestSuffixMatch(xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def is_supported(config: skips.TestConfig) -> bool:
return _is_supported(config)
@staticmethod
def url_map_change(
host_rule: HostRule, path_matcher: PathMatcher
) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [
{
"priority": 0,
# Header SuffixMatch -> alternate_backend_service.
# EmptyCall is sent with the metadata.
"matchRules": [
{
"prefixMatch": "/",
"headerMatches": [
{
"headerName": _TEST_METADATA_KEY,
"suffixMatch": _TEST_METADATA_VALUE_EMPTY[-2:],
}
],
}
],
"service": GcpResourceManager().alternative_backend_service(),
}
]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 2)
self.assertEqual(
xds_config.rds["virtualHosts"][0]["routes"][0]["match"]["headers"][
0
]["name"],
_TEST_METADATA_KEY,
)
self.assertEqual(
xds_config.rds["virtualHosts"][0]["routes"][0]["match"]["headers"][
0
]["suffixMatch"],
_TEST_METADATA_VALUE_EMPTY[-2:],
)
def rpc_distribution_validate(self, test_client: XdsTestClient):
rpc_distribution = self.configure_and_send(
test_client,
rpc_types=[RpcTypeEmptyCall],
metadata=_TEST_METADATA,
num_rpcs=_NUM_RPCS,
)
self.assertEqual(
_NUM_RPCS, rpc_distribution.empty_call_alternative_service_rpc_count
)
class TestPresentMatch(xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def is_supported(config: skips.TestConfig) -> bool:
return _is_supported(config)
@staticmethod
def url_map_change(
host_rule: HostRule, path_matcher: PathMatcher
) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [
{
"priority": 0,
# Header 'xds_md_numeric' present -> alternate_backend_service.
# UnaryCall is sent with the metadata, so will be sent to alternative.
"matchRules": [
{
"prefixMatch": "/",
"headerMatches": [
{
"headerName": _TEST_METADATA_NUMERIC_KEY,
"presentMatch": True,
}
],
}
],
"service": GcpResourceManager().alternative_backend_service(),
}
]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 2)
self.assertEqual(
xds_config.rds["virtualHosts"][0]["routes"][0]["match"]["headers"][
0
]["name"],
_TEST_METADATA_NUMERIC_KEY,
)
self.assertEqual(
xds_config.rds["virtualHosts"][0]["routes"][0]["match"]["headers"][
0
]["presentMatch"],
True,
)
def rpc_distribution_validate(self, test_client: XdsTestClient):
rpc_distribution = self.configure_and_send(
test_client,
rpc_types=(RpcTypeUnaryCall,),
metadata=_TEST_METADATA,
num_rpcs=_NUM_RPCS,
)
self.assertEqual(
_NUM_RPCS, rpc_distribution.unary_call_alternative_service_rpc_count
)
class TestInvertMatch(xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def is_supported(config: skips.TestConfig) -> bool:
return _is_supported(config)
@staticmethod
def url_map_change(
host_rule: HostRule, path_matcher: PathMatcher
) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [
{
"priority": 0,
# Header invert ExactMatch -> alternate_backend_service.
# UnaryCall is sent with the metadata, so will be sent to
# default. EmptyCall will be sent to alternative.
"matchRules": [
{
"prefixMatch": "/",
"headerMatches": [
{
"headerName": _TEST_METADATA_KEY,
"exactMatch": _TEST_METADATA_VALUE_UNARY,
"invertMatch": True,
}
],
}
],
"service": GcpResourceManager().alternative_backend_service(),
}
]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 2)
self.assertEqual(
xds_config.rds["virtualHosts"][0]["routes"][0]["match"]["headers"][
0
]["name"],
_TEST_METADATA_KEY,
)
self.assertEqual(
xds_config.rds["virtualHosts"][0]["routes"][0]["match"]["headers"][
0
]["invertMatch"],
True,
)
def rpc_distribution_validate(self, test_client: XdsTestClient):
rpc_distribution = self.configure_and_send(
test_client,
rpc_types=[RpcTypeUnaryCall, RpcTypeEmptyCall],
metadata=_TEST_METADATA,
num_rpcs=_NUM_RPCS,
)
self.assertEqual(_NUM_RPCS, rpc_distribution.num_oks)
self.assertEqual(
0, rpc_distribution.unary_call_alternative_service_rpc_count
)
self.assertEqual(
0, rpc_distribution.empty_call_default_service_rpc_count
)
class TestRangeMatch(xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def is_supported(config: skips.TestConfig) -> bool:
return _is_supported(config)
@staticmethod
def url_map_change(
host_rule: HostRule, path_matcher: PathMatcher
) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [
{
"priority": 0,
# Header 'xds_md_numeric' range [100,200] -> alternate_backend_service.
# UnaryCall is sent with the metadata in range.
"matchRules": [
{
"prefixMatch": "/",
"headerMatches": [
{
"headerName": _TEST_METADATA_NUMERIC_KEY,
"rangeMatch": {
"rangeStart": "100",
"rangeEnd": "200",
},
}
],
}
],
"service": GcpResourceManager().alternative_backend_service(),
}
]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 2)
self.assertEqual(
xds_config.rds["virtualHosts"][0]["routes"][0]["match"]["headers"][
0
]["name"],
_TEST_METADATA_NUMERIC_KEY,
)
self.assertEqual(
xds_config.rds["virtualHosts"][0]["routes"][0]["match"]["headers"][
0
]["rangeMatch"]["start"],
"100",
)
self.assertEqual(
xds_config.rds["virtualHosts"][0]["routes"][0]["match"]["headers"][
0
]["rangeMatch"]["end"],
"200",
)
def rpc_distribution_validate(self, test_client: XdsTestClient):
rpc_distribution = self.configure_and_send(
test_client,
rpc_types=[RpcTypeUnaryCall, RpcTypeEmptyCall],
metadata=_TEST_METADATA,
num_rpcs=_NUM_RPCS,
)
self.assertEqual(_NUM_RPCS, rpc_distribution.num_oks)
self.assertEqual(
0, rpc_distribution.unary_call_default_service_rpc_count
)
self.assertEqual(
0, rpc_distribution.empty_call_alternative_service_rpc_count
)
class TestRegexMatch(xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def is_supported(config: skips.TestConfig) -> bool:
return _is_supported(config)
@staticmethod
def url_map_change(
host_rule: HostRule, path_matcher: PathMatcher
) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = (
[
{
"priority": 0,
# Header RegexMatch -> alternate_backend_service.
# EmptyCall is sent with the metadata.
"matchRules": [
{
"prefixMatch": "/",
"headerMatches": [
{
"headerName": _TEST_METADATA_KEY,
"regexMatch": "^%s.*%s$"
% (
_TEST_METADATA_VALUE_EMPTY[:2],
_TEST_METADATA_VALUE_EMPTY[-2:],
),
}
],
}
],
"service": GcpResourceManager().alternative_backend_service(),
}
],
)
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 2)
self.assertEqual(
xds_config.rds["virtualHosts"][0]["routes"][0]["match"]["headers"][
0
]["name"],
_TEST_METADATA_KEY,
)
self.assertEqual(
xds_config.rds["virtualHosts"][0]["routes"][0]["match"]["headers"][
0
]["safeRegexMatch"]["regex"],
"^%s.*%s$"
% (_TEST_METADATA_VALUE_EMPTY[:2], _TEST_METADATA_VALUE_EMPTY[-2:]),
)
def rpc_distribution_validate(self, test_client: XdsTestClient):
rpc_distribution = self.configure_and_send(
test_client,
rpc_types=[RpcTypeEmptyCall],
metadata=_TEST_METADATA,
num_rpcs=_NUM_RPCS,
)
self.assertEqual(
_NUM_RPCS, rpc_distribution.empty_call_alternative_service_rpc_count
)
if __name__ == "__main__":
absltest.main()
| 17,008
| 33.783231
| 87
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/tests/url_map/fault_injection_test.py
|
# Copyright 2021 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
from typing import Tuple
from absl import flags
from absl.testing import absltest
import grpc
from framework import xds_url_map_testcase
from framework.helpers import skips
from framework.test_app import client_app
# Type aliases
HostRule = xds_url_map_testcase.HostRule
PathMatcher = xds_url_map_testcase.PathMatcher
GcpResourceManager = xds_url_map_testcase.GcpResourceManager
DumpedXdsConfig = xds_url_map_testcase.DumpedXdsConfig
RpcTypeUnaryCall = xds_url_map_testcase.RpcTypeUnaryCall
RpcTypeEmptyCall = xds_url_map_testcase.RpcTypeEmptyCall
XdsTestClient = client_app.XdsTestClient
ExpectedResult = xds_url_map_testcase.ExpectedResult
_Lang = skips.Lang
logger = logging.getLogger(__name__)
flags.adopt_module_key_flags(xds_url_map_testcase)
# The first batch of RPCs don't count towards the result of test case. They are
# meant to prove the communication between driver and client is fine.
_NUM_RPCS = 10
_LENGTH_OF_RPC_SENDING_SEC = 16
# We are using sleep to synchronize test driver and the client... Even though
# the client is sending at QPS rate, we can't assert that exactly QPS *
# SLEEP_DURATION number of RPC is finished. The final completed RPC might be
# slightly more or less.
_NON_RANDOM_ERROR_TOLERANCE = 0.01
# For random generator involved test cases, we want to be more loose about the
# final result. Otherwise, we will need more test duration (sleep duration) and
# more accurate communication mechanism. The accurate of random number
# generation is not the intention of this test.
_ERROR_TOLERANCE = 0.2
_DELAY_CASE_APPLICATION_TIMEOUT_SEC = 1
_BACKLOG_WAIT_TIME_SEC = 20
def _build_fault_injection_route_rule(
abort_percentage: int = 0, delay_percentage: int = 0
):
return {
"priority": 0,
"matchRules": [
{"fullPathMatch": "/grpc.testing.TestService/UnaryCall"}
],
"service": GcpResourceManager().default_backend_service(),
"routeAction": {
"faultInjectionPolicy": {
"abort": {
"httpStatus": 401,
"percentage": abort_percentage,
},
"delay": {
"fixedDelay": {"seconds": "20"},
"percentage": delay_percentage,
},
}
},
}
def _wait_until_backlog_cleared(
test_client: XdsTestClient, timeout: int = _BACKLOG_WAIT_TIME_SEC
):
"""Wait until the completed RPC is close to started RPC.
For delay injected test cases, there might be a backlog of RPCs due to slow
initialization of the client. E.g., if initialization took 20s and qps is
25, then there will be a backlog of 500 RPCs. In normal test cases, this is
fine, because RPCs will fail immediately. But for delay injected test cases,
the RPC might linger much longer and affect the stability of test results.
"""
logger.info("Waiting for RPC backlog to clear for %d seconds", timeout)
deadline = time.time() + timeout
while time.time() < deadline:
stats = test_client.get_load_balancer_accumulated_stats()
ok = True
for rpc_type in [RpcTypeUnaryCall, RpcTypeEmptyCall]:
started = stats.num_rpcs_started_by_method.get(rpc_type, 0)
completed = stats.num_rpcs_succeeded_by_method.get(
rpc_type, 0
) + stats.num_rpcs_failed_by_method.get(rpc_type, 0)
# We consider the backlog is healthy, if the diff between started
# RPCs and completed RPCs is less than 1.5 QPS.
if abs(started - completed) > xds_url_map_testcase.QPS.value * 1.1:
logger.info(
"RPC backlog exist: rpc_type=%s started=%s completed=%s",
rpc_type,
started,
completed,
)
time.sleep(_DELAY_CASE_APPLICATION_TIMEOUT_SEC)
ok = False
else:
logger.info(
"RPC backlog clear: rpc_type=%s started=%s completed=%s",
rpc_type,
started,
completed,
)
if ok:
# Both backlog of both types of RPCs is clear, success, return.
return
raise RuntimeError("failed to clear RPC backlog in %s seconds" % timeout)
def _is_supported(config: skips.TestConfig) -> bool:
if config.client_lang == _Lang.NODE:
return config.version_gte("v1.4.x")
return True
class TestZeroPercentFaultInjection(xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def is_supported(config: skips.TestConfig) -> bool:
return _is_supported(config)
@staticmethod
def url_map_change(
host_rule: HostRule, path_matcher: PathMatcher
) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [
_build_fault_injection_route_rule(
abort_percentage=0, delay_percentage=0
)
]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 1)
filter_config = xds_config.rds["virtualHosts"][0]["routes"][0][
"typedPerFilterConfig"
]["envoy.filters.http.fault"]
self.assertEqual("20s", filter_config["delay"]["fixedDelay"])
self.assertEqual(
0, filter_config["delay"]["percentage"].get("numerator", 0)
)
self.assertEqual(
"MILLION", filter_config["delay"]["percentage"]["denominator"]
)
self.assertEqual(401, filter_config["abort"]["httpStatus"])
self.assertEqual(
0, filter_config["abort"]["percentage"].get("numerator", 0)
)
self.assertEqual(
"MILLION", filter_config["abort"]["percentage"]["denominator"]
)
def rpc_distribution_validate(self, test_client: XdsTestClient):
self.configure_and_send(
test_client, rpc_types=(RpcTypeUnaryCall,), num_rpcs=_NUM_RPCS
)
self.assertRpcStatusCode(
test_client,
expected=(
ExpectedResult(
rpc_type=RpcTypeUnaryCall,
status_code=grpc.StatusCode.OK,
ratio=1,
),
),
length=_LENGTH_OF_RPC_SENDING_SEC,
tolerance=_NON_RANDOM_ERROR_TOLERANCE,
)
class TestNonMatchingFaultInjection(xds_url_map_testcase.XdsUrlMapTestCase):
"""EMPTY_CALL is not fault injected, so it should succeed."""
@staticmethod
def is_supported(config: skips.TestConfig) -> bool:
return _is_supported(config)
@staticmethod
def client_init_config(rpc: str, metadata: str):
# Python interop client will stuck if the traffic is slow (in this case,
# 20s injected). The purpose of this test is examining the un-injected
# traffic is not impacted, so it's fine to just send un-injected
# traffic.
return "EmptyCall", metadata
@staticmethod
def url_map_change(
host_rule: HostRule, path_matcher: PathMatcher
) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [
_build_fault_injection_route_rule(
abort_percentage=100, delay_percentage=100
)
]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 1)
# The first route rule for UNARY_CALL is fault injected
self.assertEqual(
"/grpc.testing.TestService/UnaryCall",
xds_config.rds["virtualHosts"][0]["routes"][0]["match"]["path"],
)
filter_config = xds_config.rds["virtualHosts"][0]["routes"][0][
"typedPerFilterConfig"
]["envoy.filters.http.fault"]
self.assertEqual("20s", filter_config["delay"]["fixedDelay"])
self.assertEqual(
1000000, filter_config["delay"]["percentage"]["numerator"]
)
self.assertEqual(
"MILLION", filter_config["delay"]["percentage"]["denominator"]
)
self.assertEqual(401, filter_config["abort"]["httpStatus"])
self.assertEqual(
1000000, filter_config["abort"]["percentage"]["numerator"]
)
self.assertEqual(
"MILLION", filter_config["abort"]["percentage"]["denominator"]
)
# The second route rule for all other RPCs is untouched
self.assertNotIn(
"envoy.filters.http.fault",
xds_config.rds["virtualHosts"][0]["routes"][1].get(
"typedPerFilterConfig", {}
),
)
def rpc_distribution_validate(self, test_client: XdsTestClient):
self.assertRpcStatusCode(
test_client,
expected=(
ExpectedResult(
rpc_type=RpcTypeEmptyCall,
status_code=grpc.StatusCode.OK,
ratio=1,
),
),
length=_LENGTH_OF_RPC_SENDING_SEC,
tolerance=_NON_RANDOM_ERROR_TOLERANCE,
)
@absltest.skip("20% RPC might pass immediately, reason unknown")
class TestAlwaysDelay(xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def is_supported(config: skips.TestConfig) -> bool:
return _is_supported(config)
@staticmethod
def url_map_change(
host_rule: HostRule, path_matcher: PathMatcher
) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [
_build_fault_injection_route_rule(
abort_percentage=0, delay_percentage=100
)
]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 1)
filter_config = xds_config.rds["virtualHosts"][0]["routes"][0][
"typedPerFilterConfig"
]["envoy.filters.http.fault"]
self.assertEqual("20s", filter_config["delay"]["fixedDelay"])
self.assertEqual(
1000000, filter_config["delay"]["percentage"]["numerator"]
)
self.assertEqual(
"MILLION", filter_config["delay"]["percentage"]["denominator"]
)
def rpc_distribution_validate(self, test_client: XdsTestClient):
self.configure_and_send(
test_client,
rpc_types=(RpcTypeUnaryCall,),
num_rpcs=_NUM_RPCS,
app_timeout=_DELAY_CASE_APPLICATION_TIMEOUT_SEC,
)
_wait_until_backlog_cleared(test_client)
self.assertRpcStatusCode(
test_client,
expected=(
ExpectedResult(
rpc_type=RpcTypeUnaryCall,
status_code=grpc.StatusCode.DEADLINE_EXCEEDED,
ratio=1,
),
),
length=_LENGTH_OF_RPC_SENDING_SEC,
tolerance=_NON_RANDOM_ERROR_TOLERANCE,
)
class TestAlwaysAbort(xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def is_supported(config: skips.TestConfig) -> bool:
return _is_supported(config)
@staticmethod
def url_map_change(
host_rule: HostRule, path_matcher: PathMatcher
) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [
_build_fault_injection_route_rule(
abort_percentage=100, delay_percentage=0
)
]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 1)
filter_config = xds_config.rds["virtualHosts"][0]["routes"][0][
"typedPerFilterConfig"
]["envoy.filters.http.fault"]
self.assertEqual(401, filter_config["abort"]["httpStatus"])
self.assertEqual(
1000000, filter_config["abort"]["percentage"]["numerator"]
)
self.assertEqual(
"MILLION", filter_config["abort"]["percentage"]["denominator"]
)
def rpc_distribution_validate(self, test_client: XdsTestClient):
self.configure_and_send(
test_client, rpc_types=(RpcTypeUnaryCall,), num_rpcs=_NUM_RPCS
)
self.assertRpcStatusCode(
test_client,
expected=(
ExpectedResult(
rpc_type=RpcTypeUnaryCall,
status_code=grpc.StatusCode.UNAUTHENTICATED,
ratio=1,
),
),
length=_LENGTH_OF_RPC_SENDING_SEC,
tolerance=_NON_RANDOM_ERROR_TOLERANCE,
)
class TestDelayHalf(xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def is_supported(config: skips.TestConfig) -> bool:
return _is_supported(config)
@staticmethod
def url_map_change(
host_rule: HostRule, path_matcher: PathMatcher
) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [
_build_fault_injection_route_rule(
abort_percentage=0, delay_percentage=50
)
]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 1)
filter_config = xds_config.rds["virtualHosts"][0]["routes"][0][
"typedPerFilterConfig"
]["envoy.filters.http.fault"]
self.assertEqual("20s", filter_config["delay"]["fixedDelay"])
self.assertEqual(
500000, filter_config["delay"]["percentage"]["numerator"]
)
self.assertEqual(
"MILLION", filter_config["delay"]["percentage"]["denominator"]
)
def rpc_distribution_validate(self, test_client: XdsTestClient):
self.configure_and_send(
test_client,
rpc_types=(RpcTypeUnaryCall,),
num_rpcs=_NUM_RPCS,
app_timeout=_DELAY_CASE_APPLICATION_TIMEOUT_SEC,
)
_wait_until_backlog_cleared(test_client)
self.assertRpcStatusCode(
test_client,
expected=(
ExpectedResult(
rpc_type=RpcTypeUnaryCall,
status_code=grpc.StatusCode.DEADLINE_EXCEEDED,
ratio=0.5,
),
),
length=_LENGTH_OF_RPC_SENDING_SEC,
tolerance=_ERROR_TOLERANCE,
)
class TestAbortHalf(xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def is_supported(config: skips.TestConfig) -> bool:
return _is_supported(config)
@staticmethod
def url_map_change(
host_rule: HostRule, path_matcher: PathMatcher
) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [
_build_fault_injection_route_rule(
abort_percentage=50, delay_percentage=0
)
]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 1)
filter_config = xds_config.rds["virtualHosts"][0]["routes"][0][
"typedPerFilterConfig"
]["envoy.filters.http.fault"]
self.assertEqual(401, filter_config["abort"]["httpStatus"])
self.assertEqual(
500000, filter_config["abort"]["percentage"]["numerator"]
)
self.assertEqual(
"MILLION", filter_config["abort"]["percentage"]["denominator"]
)
def rpc_distribution_validate(self, test_client: XdsTestClient):
self.configure_and_send(
test_client, rpc_types=(RpcTypeUnaryCall,), num_rpcs=_NUM_RPCS
)
self.assertRpcStatusCode(
test_client,
expected=(
ExpectedResult(
rpc_type=RpcTypeUnaryCall,
status_code=grpc.StatusCode.UNAUTHENTICATED,
ratio=0.5,
),
),
length=_LENGTH_OF_RPC_SENDING_SEC,
tolerance=_ERROR_TOLERANCE,
)
if __name__ == "__main__":
absltest.main()
| 16,732
| 35.376087
| 80
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/tests/url_map/path_matching_test.py
|
# Copyright 2021 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Tuple
from absl import flags
from absl.testing import absltest
from framework import xds_url_map_testcase
from framework.helpers import skips
from framework.test_app import client_app
# Type aliases
HostRule = xds_url_map_testcase.HostRule
PathMatcher = xds_url_map_testcase.PathMatcher
GcpResourceManager = xds_url_map_testcase.GcpResourceManager
DumpedXdsConfig = xds_url_map_testcase.DumpedXdsConfig
RpcTypeUnaryCall = xds_url_map_testcase.RpcTypeUnaryCall
RpcTypeEmptyCall = xds_url_map_testcase.RpcTypeEmptyCall
XdsTestClient = client_app.XdsTestClient
_Lang = skips.Lang
logger = logging.getLogger(__name__)
flags.adopt_module_key_flags(xds_url_map_testcase)
_NUM_RPCS = 150
def _is_supported(config: skips.TestConfig) -> bool:
if config.client_lang == _Lang.NODE:
return config.version_gte("v1.3.x")
return True
class TestFullPathMatchEmptyCall(xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def is_supported(config: skips.TestConfig) -> bool:
return _is_supported(config)
@staticmethod
def url_map_change(
host_rule: HostRule, path_matcher: PathMatcher
) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [
{
"priority": 0,
# FullPath EmptyCall -> alternate_backend_service.
"matchRules": [
{"fullPathMatch": "/grpc.testing.TestService/EmptyCall"}
],
"service": GcpResourceManager().alternative_backend_service(),
}
]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 2)
self.assertEqual(
xds_config.rds["virtualHosts"][0]["routes"][0]["match"]["path"],
"/grpc.testing.TestService/EmptyCall",
)
def rpc_distribution_validate(self, test_client: XdsTestClient):
rpc_distribution = self.configure_and_send(
test_client, rpc_types=[RpcTypeEmptyCall], num_rpcs=_NUM_RPCS
)
self.assertEqual(
_NUM_RPCS, rpc_distribution.empty_call_alternative_service_rpc_count
)
class TestFullPathMatchUnaryCall(xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def is_supported(config: skips.TestConfig) -> bool:
return _is_supported(config)
@staticmethod
def url_map_change(
host_rule: HostRule, path_matcher: PathMatcher
) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [
{
"priority": 0,
# FullPath EmptyCall -> alternate_backend_service.
"matchRules": [
{"fullPathMatch": "/grpc.testing.TestService/UnaryCall"}
],
"service": GcpResourceManager().alternative_backend_service(),
}
]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 2)
self.assertEqual(
xds_config.rds["virtualHosts"][0]["routes"][0]["match"]["path"],
"/grpc.testing.TestService/UnaryCall",
)
def rpc_distribution_validate(self, test_client: XdsTestClient):
rpc_distribution = self.configure_and_send(
test_client, rpc_types=(RpcTypeUnaryCall,), num_rpcs=_NUM_RPCS
)
self.assertEqual(
_NUM_RPCS, rpc_distribution.unary_call_alternative_service_rpc_count
)
class TestTwoRoutesAndPrefixMatch(xds_url_map_testcase.XdsUrlMapTestCase):
"""This test case is similar to the one above (but with route services
swapped). This test has two routes (full_path and the default) to match
EmptyCall, and both routes set alternative_backend_service as the action.
This forces the client to handle duplicate Clusters in the RDS response."""
@staticmethod
def is_supported(config: skips.TestConfig) -> bool:
return _is_supported(config)
@staticmethod
def url_map_change(
host_rule: HostRule, path_matcher: PathMatcher
) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [
{
"priority": 0,
# Prefix UnaryCall -> default_backend_service.
"matchRules": [
{"prefixMatch": "/grpc.testing.TestService/Unary"}
],
"service": GcpResourceManager().default_backend_service(),
},
{
"priority": 1,
# FullPath EmptyCall -> alternate_backend_service.
"matchRules": [
{"fullPathMatch": "/grpc.testing.TestService/EmptyCall"}
],
"service": GcpResourceManager().alternative_backend_service(),
},
]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 2)
self.assertEqual(
xds_config.rds["virtualHosts"][0]["routes"][0]["match"]["prefix"],
"/grpc.testing.TestService/Unary",
)
self.assertEqual(
xds_config.rds["virtualHosts"][0]["routes"][1]["match"]["path"],
"/grpc.testing.TestService/EmptyCall",
)
def rpc_distribution_validate(self, test_client: XdsTestClient):
rpc_distribution = self.configure_and_send(
test_client,
rpc_types=[RpcTypeUnaryCall, RpcTypeEmptyCall],
num_rpcs=_NUM_RPCS,
)
self.assertEqual(0, rpc_distribution.num_failures)
self.assertEqual(
0, rpc_distribution.unary_call_alternative_service_rpc_count
)
self.assertEqual(
0, rpc_distribution.empty_call_default_service_rpc_count
)
class TestRegexMatch(xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def is_supported(config: skips.TestConfig) -> bool:
return _is_supported(config)
@staticmethod
def url_map_change(
host_rule: HostRule, path_matcher: PathMatcher
) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [
{
"priority": 0,
# Regex UnaryCall -> alternate_backend_service.
"matchRules": [
{
"regexMatch": ( # Unary methods with any services.
r"^\/.*\/UnaryCall$"
)
}
],
"service": GcpResourceManager().alternative_backend_service(),
}
]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 2)
self.assertEqual(
xds_config.rds["virtualHosts"][0]["routes"][0]["match"][
"safeRegex"
]["regex"],
r"^\/.*\/UnaryCall$",
)
def rpc_distribution_validate(self, test_client: XdsTestClient):
rpc_distribution = self.configure_and_send(
test_client, rpc_types=(RpcTypeUnaryCall,), num_rpcs=_NUM_RPCS
)
self.assertEqual(
_NUM_RPCS, rpc_distribution.unary_call_alternative_service_rpc_count
)
class TestCaseInsensitiveMatch(xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def is_supported(config: skips.TestConfig) -> bool:
return _is_supported(config)
@staticmethod
def url_map_change(
host_rule: HostRule, path_matcher: PathMatcher
) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [
{
"priority": 0,
# ignoreCase EmptyCall -> alternate_backend_service.
"matchRules": [
{
# Case insensitive matching.
"fullPathMatch": "/gRpC.tEsTinG.tEstseRvice/empTycaLl",
"ignoreCase": True,
}
],
"service": GcpResourceManager().alternative_backend_service(),
}
]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 2)
self.assertEqual(
xds_config.rds["virtualHosts"][0]["routes"][0]["match"]["path"],
"/gRpC.tEsTinG.tEstseRvice/empTycaLl",
)
self.assertEqual(
xds_config.rds["virtualHosts"][0]["routes"][0]["match"][
"caseSensitive"
],
False,
)
def rpc_distribution_validate(self, test_client: XdsTestClient):
rpc_distribution = self.configure_and_send(
test_client, rpc_types=[RpcTypeEmptyCall], num_rpcs=_NUM_RPCS
)
self.assertEqual(
_NUM_RPCS, rpc_distribution.empty_call_alternative_service_rpc_count
)
if __name__ == "__main__":
absltest.main()
| 9,665
| 34.536765
| 80
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/tests/url_map/timeout_test.py
|
# Copyright 2021 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Tuple
import unittest
from absl import flags
from absl.testing import absltest
import grpc
from framework import xds_url_map_testcase
from framework.helpers import skips
from framework.test_app import client_app
# Type aliases
HostRule = xds_url_map_testcase.HostRule
PathMatcher = xds_url_map_testcase.PathMatcher
GcpResourceManager = xds_url_map_testcase.GcpResourceManager
DumpedXdsConfig = xds_url_map_testcase.DumpedXdsConfig
RpcTypeUnaryCall = xds_url_map_testcase.RpcTypeUnaryCall
RpcTypeEmptyCall = xds_url_map_testcase.RpcTypeEmptyCall
ExpectedResult = xds_url_map_testcase.ExpectedResult
XdsTestClient = client_app.XdsTestClient
XdsUrlMapTestCase = xds_url_map_testcase.XdsUrlMapTestCase
logger = logging.getLogger(__name__)
flags.adopt_module_key_flags(xds_url_map_testcase)
# The first batch of RPCs don't count towards the result of test case. They are
# meant to prove the communication between driver and client is fine.
_NUM_RPCS = 25
_LENGTH_OF_RPC_SENDING_SEC = 10
_ERROR_TOLERANCE = 0.1
class _BaseXdsTimeOutTestCase(XdsUrlMapTestCase):
@staticmethod
def url_map_change(
host_rule: HostRule, path_matcher: PathMatcher
) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [
{
"priority": 0,
"matchRules": [
{"fullPathMatch": "/grpc.testing.TestService/UnaryCall"}
],
"service": GcpResourceManager().default_backend_service(),
"routeAction": {
"maxStreamDuration": {
"seconds": 3,
},
},
}
]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 1)
self.assertEqual(
xds_config.rds["virtualHosts"][0]["routes"][0]["route"][
"maxStreamDuration"
]["maxStreamDuration"],
"3s",
)
self.assertEqual(
xds_config.rds["virtualHosts"][0]["routes"][0]["route"][
"maxStreamDuration"
]["grpcTimeoutHeaderMax"],
"3s",
)
def rpc_distribution_validate(self, unused_test_client):
raise NotImplementedError()
class TestTimeoutInRouteRule(_BaseXdsTimeOutTestCase):
@staticmethod
def is_supported(config: skips.TestConfig) -> bool:
# TODO(lidiz) either add support for rpc-behavior to other languages, or we
# should always use Java server as backend.
if config.server_lang != "java":
return False
if config.client_lang == skips.Lang.NODE:
return config.version_gte("v1.4.x")
return True
def rpc_distribution_validate(self, test_client: XdsTestClient):
self.configure_and_send(
test_client,
rpc_types=[RpcTypeUnaryCall, RpcTypeEmptyCall],
# UnaryCall and EmptyCall both sleep-4.
# UnaryCall timeouts, EmptyCall succeeds.
metadata=(
(RpcTypeUnaryCall, "rpc-behavior", "sleep-4"),
(RpcTypeEmptyCall, "rpc-behavior", "sleep-4"),
),
num_rpcs=_NUM_RPCS,
)
self.assertRpcStatusCode(
test_client,
expected=(
ExpectedResult(
rpc_type=RpcTypeUnaryCall,
status_code=grpc.StatusCode.DEADLINE_EXCEEDED,
),
ExpectedResult(
rpc_type=RpcTypeEmptyCall, status_code=grpc.StatusCode.OK
),
),
length=_LENGTH_OF_RPC_SENDING_SEC,
tolerance=_ERROR_TOLERANCE,
)
class TestTimeoutInApplication(_BaseXdsTimeOutTestCase):
@staticmethod
def is_supported(config: skips.TestConfig) -> bool:
# TODO(lidiz) either add support for rpc-behavior to other languages, or we
# should always use Java server as backend.
if config.server_lang != "java":
return False
if config.client_lang == skips.Lang.NODE:
return config.version_gte("v1.4.x")
return True
def rpc_distribution_validate(self, test_client: XdsTestClient):
self.configure_and_send(
test_client,
rpc_types=(RpcTypeUnaryCall,),
# UnaryCall only with sleep-2; timeout=1s; calls timeout.
metadata=((RpcTypeUnaryCall, "rpc-behavior", "sleep-2"),),
app_timeout=1,
num_rpcs=_NUM_RPCS,
)
self.assertRpcStatusCode(
test_client,
expected=(
ExpectedResult(
rpc_type=RpcTypeUnaryCall,
status_code=grpc.StatusCode.DEADLINE_EXCEEDED,
),
),
length=_LENGTH_OF_RPC_SENDING_SEC,
tolerance=_ERROR_TOLERANCE,
)
class TestTimeoutNotExceeded(_BaseXdsTimeOutTestCase):
@staticmethod
def is_supported(config: skips.TestConfig) -> bool:
if config.client_lang == skips.Lang.NODE:
return config.version_gte("v1.4.x")
return True
def rpc_distribution_validate(self, test_client: XdsTestClient):
self.configure_and_send(
test_client,
# UnaryCall only with no sleep; calls succeed.
rpc_types=(RpcTypeUnaryCall,),
num_rpcs=_NUM_RPCS,
)
self.assertRpcStatusCode(
test_client,
expected=(
ExpectedResult(
rpc_type=RpcTypeUnaryCall, status_code=grpc.StatusCode.OK
),
),
length=_LENGTH_OF_RPC_SENDING_SEC,
tolerance=_ERROR_TOLERANCE,
)
def load_tests(loader: absltest.TestLoader, unused_tests, unused_pattern):
suite = unittest.TestSuite()
test_cases = [
TestTimeoutInRouteRule,
TestTimeoutInApplication,
TestTimeoutNotExceeded,
]
for test_class in test_cases:
tests = loader.loadTestsFromTestCase(test_class)
suite.addTests(tests)
return suite
if __name__ == "__main__":
absltest.main()
| 6,818
| 33.095
| 83
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/tests/url_map/retry_test.py
|
# Copyright 2021 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Tuple
from absl import flags
from absl.testing import absltest
import grpc
from framework import xds_url_map_testcase
from framework.helpers import skips
from framework.test_app import client_app
# Type aliases
HostRule = xds_url_map_testcase.HostRule
PathMatcher = xds_url_map_testcase.PathMatcher
GcpResourceManager = xds_url_map_testcase.GcpResourceManager
DumpedXdsConfig = xds_url_map_testcase.DumpedXdsConfig
RpcTypeUnaryCall = xds_url_map_testcase.RpcTypeUnaryCall
XdsTestClient = client_app.XdsTestClient
ExpectedResult = xds_url_map_testcase.ExpectedResult
_Lang = skips.Lang
logger = logging.getLogger(__name__)
flags.adopt_module_key_flags(xds_url_map_testcase)
# The first batch of RPCs don't count towards the result of test case. They are
# meant to prove the communication between driver and client is fine.
_NUM_RPCS = 10
_LENGTH_OF_RPC_SENDING_SEC = 16
# We are using sleep to synchronize test driver and the client... Even though
# the client is sending at QPS rate, we can't assert that exactly QPS *
# SLEEP_DURATION number of RPC is finished. The final completed RPC might be
# slightly more or less.
_NON_RANDOM_ERROR_TOLERANCE = 0.01
_RPC_BEHAVIOR_HEADER_NAME = "rpc-behavior"
def _build_retry_route_rule(retryConditions, num_retries):
return {
"priority": 0,
"matchRules": [
{"fullPathMatch": "/grpc.testing.TestService/UnaryCall"}
],
"service": GcpResourceManager().default_backend_service(),
"routeAction": {
"retryPolicy": {
"retryConditions": retryConditions,
"numRetries": num_retries,
}
},
}
def _is_supported(config: skips.TestConfig) -> bool:
# Per "Retry" in
# https://github.com/grpc/grpc/blob/master/doc/grpc_xds_features.md
if config.client_lang in _Lang.CPP | _Lang.JAVA | _Lang.PYTHON:
return config.version_gte("v1.40.x")
elif config.client_lang == _Lang.GO:
return config.version_gte("v1.41.x")
elif config.client_lang == _Lang.NODE:
return config.version_gte("v1.8.x")
return True
class TestRetryUpTo3AttemptsAndFail(xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def is_supported(config: skips.TestConfig) -> bool:
return _is_supported(config)
@staticmethod
def url_map_change(
host_rule: HostRule, path_matcher: PathMatcher
) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [
_build_retry_route_rule(
retryConditions=["unavailable"], num_retries=3
)
]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 1)
retry_config = xds_config.rds["virtualHosts"][0]["routes"][0]["route"][
"retryPolicy"
]
self.assertEqual(3, retry_config["numRetries"])
self.assertEqual("unavailable", retry_config["retryOn"])
def rpc_distribution_validate(self, test_client: XdsTestClient):
self.configure_and_send(
test_client,
rpc_types=(RpcTypeUnaryCall,),
metadata=[
(
RpcTypeUnaryCall,
_RPC_BEHAVIOR_HEADER_NAME,
"succeed-on-retry-attempt-4,error-code-14",
)
],
num_rpcs=_NUM_RPCS,
)
self.assertRpcStatusCode(
test_client,
expected=(
ExpectedResult(
rpc_type=RpcTypeUnaryCall,
status_code=grpc.StatusCode.UNAVAILABLE,
ratio=1,
),
),
length=_LENGTH_OF_RPC_SENDING_SEC,
tolerance=_NON_RANDOM_ERROR_TOLERANCE,
)
class TestRetryUpTo4AttemptsAndSucceed(xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def is_supported(config: skips.TestConfig) -> bool:
return _is_supported(config)
@staticmethod
def url_map_change(
host_rule: HostRule, path_matcher: PathMatcher
) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [
_build_retry_route_rule(
retryConditions=["unavailable"], num_retries=4
)
]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 1)
retry_config = xds_config.rds["virtualHosts"][0]["routes"][0]["route"][
"retryPolicy"
]
self.assertEqual(4, retry_config["numRetries"])
self.assertEqual("unavailable", retry_config["retryOn"])
def rpc_distribution_validate(self, test_client: XdsTestClient):
self.configure_and_send(
test_client,
rpc_types=(RpcTypeUnaryCall,),
metadata=[
(
RpcTypeUnaryCall,
_RPC_BEHAVIOR_HEADER_NAME,
"succeed-on-retry-attempt-4,error-code-14",
)
],
num_rpcs=_NUM_RPCS,
)
self.assertRpcStatusCode(
test_client,
expected=(
ExpectedResult(
rpc_type=RpcTypeUnaryCall,
status_code=grpc.StatusCode.OK,
ratio=1,
),
),
length=_LENGTH_OF_RPC_SENDING_SEC,
tolerance=_NON_RANDOM_ERROR_TOLERANCE,
)
if __name__ == "__main__":
absltest.main()
| 6,174
| 32.928571
| 79
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/tests/url_map/affinity_test.py
|
# Copyright 2021 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Tuple
from absl import flags
from absl.testing import absltest
from framework import xds_url_map_testcase
from framework.helpers import skips
from framework.infrastructure import traffic_director
from framework.rpc import grpc_channelz
from framework.test_app import client_app
# Type aliases
HostRule = xds_url_map_testcase.HostRule
PathMatcher = xds_url_map_testcase.PathMatcher
GcpResourceManager = xds_url_map_testcase.GcpResourceManager
DumpedXdsConfig = xds_url_map_testcase.DumpedXdsConfig
RpcTypeUnaryCall = xds_url_map_testcase.RpcTypeUnaryCall
RpcTypeEmptyCall = xds_url_map_testcase.RpcTypeEmptyCall
XdsTestClient = client_app.XdsTestClient
_Lang = skips.Lang
logger = logging.getLogger(__name__)
flags.adopt_module_key_flags(xds_url_map_testcase)
_NUM_RPCS = 150
_TEST_METADATA_KEY = traffic_director.TEST_AFFINITY_METADATA_KEY
_TEST_METADATA_VALUE_UNARY = "unary_yranu"
_TEST_METADATA_VALUE_EMPTY = "empty_ytpme"
_TEST_METADATA_NUMERIC_KEY = "xds_md_numeric"
_TEST_METADATA_NUMERIC_VALUE = "159"
_TEST_METADATA = (
(RpcTypeUnaryCall, _TEST_METADATA_KEY, _TEST_METADATA_VALUE_UNARY),
(RpcTypeEmptyCall, _TEST_METADATA_KEY, _TEST_METADATA_VALUE_EMPTY),
(
RpcTypeUnaryCall,
_TEST_METADATA_NUMERIC_KEY,
_TEST_METADATA_NUMERIC_VALUE,
),
)
_ChannelzChannelState = grpc_channelz.ChannelState
def _is_supported(config: skips.TestConfig) -> bool:
# Per "Ring hash" in
# https://github.com/grpc/grpc/blob/master/doc/grpc_xds_features.md
if config.client_lang in _Lang.CPP | _Lang.JAVA:
return config.version_gte("v1.40.x")
elif config.client_lang == _Lang.GO:
return config.version_gte("v1.41.x")
elif config.client_lang == _Lang.PYTHON:
# TODO(https://github.com/grpc/grpc/issues/27430): supported after
# the issue is fixed.
return False
elif config.client_lang == _Lang.NODE:
return False
return True
class TestHeaderBasedAffinity(xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def is_supported(config: skips.TestConfig) -> bool:
return _is_supported(config)
@staticmethod
def client_init_config(rpc: str, metadata: str):
# Config the init RPCs to send with the same set of metadata. Without
# this, the init RPCs will not have headers, and will pick random
# backends (behavior of RING_HASH). This is necessary to only one
# sub-channel is picked and used from the beginning, thus the channel
# will only create this one sub-channel.
return "EmptyCall", "EmptyCall:%s:%s" % (
_TEST_METADATA_KEY,
_TEST_METADATA_VALUE_EMPTY,
)
@staticmethod
def url_map_change(
host_rule: HostRule, path_matcher: PathMatcher
) -> Tuple[HostRule, PathMatcher]:
# Update default service to the affinity service.
path_matcher[
"defaultService"
] = GcpResourceManager().affinity_backend_service()
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
# 3 endpoints in the affinity backend service.
self.assertNumEndpoints(xds_config, 3)
self.assertEqual(
xds_config.rds["virtualHosts"][0]["routes"][0]["route"][
"hashPolicy"
][0]["header"]["headerName"],
_TEST_METADATA_KEY,
)
self.assertEqual(xds_config.cds[0]["lbPolicy"], "RING_HASH")
def rpc_distribution_validate(self, test_client: XdsTestClient):
rpc_distribution = self.configure_and_send(
test_client,
rpc_types=[RpcTypeEmptyCall],
metadata=_TEST_METADATA,
num_rpcs=_NUM_RPCS,
)
# Only one backend should receive traffic, even though there are 3
# backends.
self.assertEqual(1, rpc_distribution.num_peers)
self.assertLen(
test_client.find_subchannels_with_state(
_ChannelzChannelState.READY
),
1,
)
# Any remaining subchannels may be in any state.
# Send 150 RPCs without headers. RPCs without headers will pick random
# backends. After this, we expect to see all backends to be connected.
rpc_distribution = self.configure_and_send(
test_client,
rpc_types=[RpcTypeEmptyCall, RpcTypeUnaryCall],
num_rpcs=_NUM_RPCS,
)
self.assertEqual(3, rpc_distribution.num_peers)
self.assertLen(
test_client.find_subchannels_with_state(
_ChannelzChannelState.READY
),
3,
)
class TestHeaderBasedAffinityMultipleHeaders(
xds_url_map_testcase.XdsUrlMapTestCase
):
@staticmethod
def is_supported(config: skips.TestConfig) -> bool:
return _is_supported(config)
@staticmethod
def client_init_config(rpc: str, metadata: str):
# Config the init RPCs to send with the same set of metadata. Without
# this, the init RPCs will not have headers, and will pick random
# backends (behavior of RING_HASH). This is necessary to only one
# sub-channel is picked and used from the beginning, thus the channel
# will only create this one sub-channel.
return "EmptyCall", "EmptyCall:%s:%s" % (
_TEST_METADATA_KEY,
_TEST_METADATA_VALUE_EMPTY,
)
@staticmethod
def url_map_change(
host_rule: HostRule, path_matcher: PathMatcher
) -> Tuple[HostRule, PathMatcher]:
# Update default service to the affinity service.
path_matcher[
"defaultService"
] = GcpResourceManager().affinity_backend_service()
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
# 3 endpoints in the affinity backend service.
self.assertNumEndpoints(xds_config, 3)
self.assertEqual(
xds_config.rds["virtualHosts"][0]["routes"][0]["route"][
"hashPolicy"
][0]["header"]["headerName"],
_TEST_METADATA_KEY,
)
self.assertEqual(xds_config.cds[0]["lbPolicy"], "RING_HASH")
def rpc_distribution_validate(self, test_client: XdsTestClient):
rpc_distribution = self.configure_and_send(
test_client,
rpc_types=[RpcTypeEmptyCall],
metadata=_TEST_METADATA,
num_rpcs=_NUM_RPCS,
)
# Only one backend should receive traffic, even though there are 3
# backends.
self.assertEqual(1, rpc_distribution.num_peers)
self.assertLen(
test_client.find_subchannels_with_state(
_ChannelzChannelState.READY
),
1,
)
# Any remaining subchannels may be in any state.
empty_call_peer = list(
rpc_distribution.raw["rpcsByMethod"]["EmptyCall"][
"rpcsByPeer"
].keys()
)[0]
# Send RPCs with a different metadata value, try different values to
# verify that the client will pick a different backend.
#
# EmptyCalls will be sent with the same metadata as before, and
# UnaryCalls will be sent with headers from ["0".."29"]. We check the
# endpoint picked for UnaryCall, and stop as soon as one different from
# the EmptyCall peer is picked.
#
# Note that there's a small chance all the headers would still pick the
# same backend used by EmptyCall. But there will be over a thousand
# nodes on the ring (default min size is 1024), and the probability of
# picking the same backend should be fairly small.
different_peer_picked = False
for i in range(30):
new_metadata = (
(
RpcTypeEmptyCall,
_TEST_METADATA_KEY,
_TEST_METADATA_VALUE_EMPTY,
),
(RpcTypeUnaryCall, _TEST_METADATA_KEY, str(i)),
)
rpc_distribution = self.configure_and_send(
test_client,
rpc_types=[RpcTypeEmptyCall, RpcTypeUnaryCall],
metadata=new_metadata,
num_rpcs=_NUM_RPCS,
)
unary_call_peer = list(
rpc_distribution.raw["rpcsByMethod"]["UnaryCall"][
"rpcsByPeer"
].keys()
)[0]
if unary_call_peer != empty_call_peer:
different_peer_picked = True
break
self.assertTrue(
different_peer_picked,
(
"the same endpoint was picked for all the headers, expect a "
"different endpoint to be picked"
),
)
self.assertLen(
test_client.find_subchannels_with_state(
_ChannelzChannelState.READY
),
2,
)
# Any remaining subchannels may be in any state.
# TODO: add more test cases
# 1. based on the basic test, turn down the backend in use, then verify that all
# RPCs are sent to another backend
if __name__ == "__main__":
absltest.main()
| 9,859
| 35.518519
| 80
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/tests/url_map/__init__.py
|
# Copyright 2021 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 580
| 40.5
| 74
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/tests/url_map/csds_test.py
|
# Copyright 2021 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Tuple
from absl import flags
from absl.testing import absltest
from framework import xds_url_map_testcase
from framework.helpers import skips
from framework.test_app import client_app
# Type aliases
HostRule = xds_url_map_testcase.HostRule
PathMatcher = xds_url_map_testcase.PathMatcher
GcpResourceManager = xds_url_map_testcase.GcpResourceManager
DumpedXdsConfig = xds_url_map_testcase.DumpedXdsConfig
RpcTypeUnaryCall = xds_url_map_testcase.RpcTypeUnaryCall
RpcTypeEmptyCall = xds_url_map_testcase.RpcTypeEmptyCall
XdsTestClient = client_app.XdsTestClient
_Lang = skips.Lang
logger = logging.getLogger(__name__)
flags.adopt_module_key_flags(xds_url_map_testcase)
_NUM_RPCS = 50
class TestBasicCsds(xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def is_supported(config: skips.TestConfig) -> bool:
if config.client_lang == _Lang.NODE:
return config.version_gte("v1.5.x")
return True
@staticmethod
def url_map_change(
host_rule: HostRule, path_matcher: PathMatcher
) -> Tuple[HostRule, PathMatcher]:
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
# Validate Endpoint Configs
self.assertNumEndpoints(xds_config, 1)
# Validate Node
self.assertEqual(
self.test_client.ip, xds_config["node"]["metadata"]["INSTANCE_IP"]
)
# Validate Listeners
self.assertIsNotNone(xds_config.lds)
self.assertEqual(self.hostname(), xds_config.lds["name"])
# Validate Route Configs
self.assertTrue(xds_config.rds["virtualHosts"])
# Validate Clusters
self.assertEqual(1, len(xds_config.cds))
self.assertEqual("EDS", xds_config.cds[0]["type"])
def rpc_distribution_validate(self, test_client: XdsTestClient):
rpc_distribution = self.configure_and_send(
test_client,
rpc_types=[RpcTypeUnaryCall, RpcTypeEmptyCall],
num_rpcs=_NUM_RPCS,
)
self.assertEqual(_NUM_RPCS, rpc_distribution.num_oks)
if __name__ == "__main__":
absltest.main()
| 2,737
| 33.225
| 78
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/bootstrap_generator_testcase.py
|
# Copyright 2022 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Optional
from framework import xds_k8s_testcase
from framework.helpers import rand as helpers_rand
from framework.infrastructure import k8s
from framework.infrastructure import traffic_director
from framework.test_app.runners.k8s import k8s_xds_client_runner
from framework.test_app.runners.k8s import k8s_xds_server_runner
logger = logging.getLogger(__name__)
# Type aliases
TrafficDirectorManager = traffic_director.TrafficDirectorManager
XdsTestServer = xds_k8s_testcase.XdsTestServer
XdsTestClient = xds_k8s_testcase.XdsTestClient
KubernetesServerRunner = k8s_xds_server_runner.KubernetesServerRunner
KubernetesClientRunner = k8s_xds_client_runner.KubernetesClientRunner
class BootstrapGeneratorBaseTest(xds_k8s_testcase.XdsKubernetesBaseTestCase):
"""Common functionality to support testing of bootstrap generator versions
across gRPC clients and servers."""
@classmethod
def setUpClass(cls):
"""Hook method for setting up class fixture before running tests in
the class.
"""
super().setUpClass()
if cls.server_maintenance_port is None:
cls.server_maintenance_port = (
KubernetesServerRunner.DEFAULT_MAINTENANCE_PORT
)
# Bootstrap generator tests are run as parameterized tests which only
# perform steps specific to the parameterized version of the bootstrap
# generator under test.
#
# Here, we perform setup steps which are common across client and server
# side variants of the bootstrap generator test.
if cls.resource_suffix_randomize:
cls.resource_suffix = helpers_rand.random_resource_suffix()
logger.info(
"Test run resource prefix: %s, suffix: %s",
cls.resource_prefix,
cls.resource_suffix,
)
# TD Manager
cls.td = cls.initTrafficDirectorManager()
# Test namespaces for client and server.
cls.server_namespace = KubernetesServerRunner.make_namespace_name(
cls.resource_prefix, cls.resource_suffix
)
cls.client_namespace = KubernetesClientRunner.make_namespace_name(
cls.resource_prefix, cls.resource_suffix
)
# Ensures the firewall exist
if cls.ensure_firewall:
cls.td.create_firewall_rule(
allowed_ports=cls.firewall_allowed_ports
)
# Randomize xds port, when it's set to 0
if cls.server_xds_port == 0:
# TODO(sergiitk): this is prone to race conditions:
# The port might not me taken now, but there's not guarantee
# it won't be taken until the tests get to creating
# forwarding rule. This check is better than nothing,
# but we should find a better approach.
cls.server_xds_port = cls.td.find_unused_forwarding_rule_port()
logger.info("Found unused xds port: %s", cls.server_xds_port)
# Common TD resources across client and server tests.
cls.td.setup_for_grpc(
cls.server_xds_host,
cls.server_xds_port,
health_check_port=cls.server_maintenance_port,
)
@classmethod
def tearDownClass(cls):
cls.td.cleanup(force=cls.force_cleanup)
super().tearDownClass()
@classmethod
def initTrafficDirectorManager(cls) -> TrafficDirectorManager:
return TrafficDirectorManager(
cls.gcp_api_manager,
project=cls.project,
resource_prefix=cls.resource_prefix,
resource_suffix=cls.resource_suffix,
network=cls.network,
compute_api_version=cls.compute_api_version,
)
@classmethod
def initKubernetesServerRunner(
cls, *, td_bootstrap_image: Optional[str] = None
) -> KubernetesServerRunner:
if not td_bootstrap_image:
td_bootstrap_image = cls.td_bootstrap_image
return KubernetesServerRunner(
k8s.KubernetesNamespace(cls.k8s_api_manager, cls.server_namespace),
deployment_name=cls.server_name,
image_name=cls.server_image,
td_bootstrap_image=td_bootstrap_image,
gcp_project=cls.project,
gcp_api_manager=cls.gcp_api_manager,
gcp_service_account=cls.gcp_service_account,
xds_server_uri=cls.xds_server_uri,
network=cls.network,
debug_use_port_forwarding=cls.debug_use_port_forwarding,
enable_workload_identity=cls.enable_workload_identity,
)
@staticmethod
def startTestServer(
server_runner,
port,
maintenance_port,
xds_host,
xds_port,
replica_count=1,
**kwargs,
) -> XdsTestServer:
test_server = server_runner.run(
replica_count=replica_count,
test_port=port,
maintenance_port=maintenance_port,
**kwargs,
)[0]
test_server.set_xds_address(xds_host, xds_port)
return test_server
def initKubernetesClientRunner(
self, td_bootstrap_image: Optional[str] = None
) -> KubernetesClientRunner:
if not td_bootstrap_image:
td_bootstrap_image = self.td_bootstrap_image
return KubernetesClientRunner(
k8s.KubernetesNamespace(
self.k8s_api_manager, self.client_namespace
),
deployment_name=self.client_name,
image_name=self.client_image,
td_bootstrap_image=td_bootstrap_image,
gcp_project=self.project,
gcp_api_manager=self.gcp_api_manager,
gcp_service_account=self.gcp_service_account,
xds_server_uri=self.xds_server_uri,
network=self.network,
debug_use_port_forwarding=self.debug_use_port_forwarding,
enable_workload_identity=self.enable_workload_identity,
stats_port=self.client_port,
reuse_namespace=self.server_namespace == self.client_namespace,
)
def startTestClient(
self, test_server: XdsTestServer, **kwargs
) -> XdsTestClient:
test_client = self.client_runner.run(
server_target=test_server.xds_uri, **kwargs
)
test_client.wait_for_active_server_channel()
return test_client
| 6,963
| 37.054645
| 80
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/xds_url_map_test_resources.py
|
# Copyright 2021 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A test framework built for urlMap related xDS test cases."""
import functools
import inspect
from typing import Any, Iterable, Mapping, Tuple
from absl import flags
from absl import logging
from framework import xds_flags
from framework import xds_k8s_flags
import framework.helpers.rand
from framework.infrastructure import gcp
from framework.infrastructure import k8s
from framework.infrastructure import traffic_director
from framework.test_app.runners.k8s import k8s_xds_client_runner
from framework.test_app.runners.k8s import k8s_xds_server_runner
flags.adopt_module_key_flags(xds_flags)
flags.adopt_module_key_flags(xds_k8s_flags)
STRATEGY = flags.DEFINE_enum(
"strategy",
default="reuse",
enum_values=["create", "keep", "reuse"],
help="Strategy of GCP resources management",
)
# Type alias
_KubernetesServerRunner = k8s_xds_server_runner.KubernetesServerRunner
_KubernetesClientRunner = k8s_xds_client_runner.KubernetesClientRunner
UrlMapType = Any
HostRule = Any
PathMatcher = Any
class _UrlMapChangeAggregator:
"""Where all the urlMap change happens."""
def __init__(self, url_map_name: str):
self._map = {
"name": url_map_name,
"defaultService": GcpResourceManager().default_backend_service(),
"hostRules": [],
"pathMatchers": [],
}
def get_map(self) -> UrlMapType:
return self._map
def apply_change(self, test_case: "XdsUrlMapTestCase") -> None:
logging.info(
"Apply urlMap change for test case: %s.%s",
test_case.short_module_name,
test_case.__name__,
)
url_map_parts = test_case.url_map_change(
*self._get_test_case_url_map(test_case)
)
self._set_test_case_url_map(*url_map_parts)
@staticmethod
def _get_test_case_url_map(
test_case: "XdsUrlMapTestCase",
) -> Tuple[HostRule, PathMatcher]:
host_rule = {
"hosts": [test_case.hostname()],
"pathMatcher": test_case.path_matcher_name(),
}
path_matcher = {
"name": test_case.path_matcher_name(),
"defaultService": GcpResourceManager().default_backend_service(),
}
return host_rule, path_matcher
def _set_test_case_url_map(
self, host_rule: HostRule, path_matcher: PathMatcher
) -> None:
self._map["hostRules"].append(host_rule)
self._map["pathMatchers"].append(path_matcher)
def _package_flags() -> Mapping[str, Any]:
"""Automatically parse Abseil flags into a dictionary.
Abseil flag is only available after the Abseil app initialization. If we use
__new__ in our metaclass, the flag value parse will happen during the
initialization of modules, hence will fail. That's why we are using __call__
to inject metaclass magics, and the flag parsing will be delayed until the
class is about to be instantiated.
"""
res = {}
for flag_module in [xds_flags, xds_k8s_flags]:
for key, value in inspect.getmembers(flag_module):
if isinstance(value, flags.FlagHolder):
res[key.lower()] = value.value
res["strategy"] = STRATEGY.value
return res
class _MetaSingletonAndAbslFlags(type):
"""Ensures singleton and injects flag values."""
# Allow different subclasses to create different singletons.
_instances = {}
# But we only parse Abseil flags once.
_flags = None
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
if cls._flags is None:
cls._flags = _package_flags()
obj = super().__call__(cls._flags, *args, **kwargs)
cls._instances[cls] = obj
return obj
return cls._instances[cls]
class GcpResourceManager(metaclass=_MetaSingletonAndAbslFlags):
"""Manages the lifecycle of GCP resources.
The GCP resources including:
- 3 K8s deployment (client, default backends, alternative backends)
- Full set of the Traffic Director stuff
- Merged gigantic urlMap from all imported test cases
All resources are intended to be used across test cases and multiple runs
(except the client K8s deployment).
"""
# This class dynamically set, so disable "no-member" check.
# pylint: disable=no-member
def __init__(self, absl_flags: Mapping[str, Any] = None):
if absl_flags is not None:
for key in absl_flags:
setattr(self, key, absl_flags[key])
# Pick a client_namespace_suffix if not set
if getattr(self, "resource_suffix", None) is None:
self.resource_suffix = ""
else:
raise NotImplementedError(
"Predefined resource_suffix is not supported for UrlMap tests"
)
logging.info(
"GcpResourceManager: resource prefix=%s, suffix=%s",
self.resource_prefix,
self.resource_suffix,
)
# Must be called before KubernetesApiManager or GcpApiManager init.
xds_flags.set_socket_default_timeout_from_flag()
# API managers
self.k8s_api_manager = k8s.KubernetesApiManager(self.kube_context)
self.gcp_api_manager = gcp.api.GcpApiManager()
self.td = traffic_director.TrafficDirectorManager(
self.gcp_api_manager,
self.project,
resource_prefix=self.resource_prefix,
resource_suffix=(self.resource_suffix or ""),
network=self.network,
compute_api_version=self.compute_api_version,
)
# Kubernetes namespace
self.k8s_namespace = k8s.KubernetesNamespace(
self.k8s_api_manager, self.resource_prefix
)
# Kubernetes Test Servers
self.test_server_runner = _KubernetesServerRunner(
self.k8s_namespace,
deployment_name=self.server_name,
image_name=self.server_image,
gcp_project=self.project,
gcp_api_manager=self.gcp_api_manager,
gcp_service_account=self.gcp_service_account,
td_bootstrap_image=self.td_bootstrap_image,
xds_server_uri=self.xds_server_uri,
network=self.network,
enable_workload_identity=self.enable_workload_identity,
)
self.test_server_alternative_runner = _KubernetesServerRunner(
self.k8s_namespace,
deployment_name=self.server_name + "-alternative",
image_name=self.server_image,
gcp_project=self.project,
gcp_api_manager=self.gcp_api_manager,
gcp_service_account=self.gcp_service_account,
td_bootstrap_image=self.td_bootstrap_image,
xds_server_uri=self.xds_server_uri,
network=self.network,
enable_workload_identity=self.enable_workload_identity,
reuse_namespace=True,
)
self.test_server_affinity_runner = _KubernetesServerRunner(
self.k8s_namespace,
deployment_name=self.server_name + "-affinity",
image_name=self.server_image,
gcp_project=self.project,
gcp_api_manager=self.gcp_api_manager,
gcp_service_account=self.gcp_service_account,
td_bootstrap_image=self.td_bootstrap_image,
xds_server_uri=self.xds_server_uri,
network=self.network,
enable_workload_identity=self.enable_workload_identity,
reuse_namespace=True,
)
logging.info("Strategy of GCP resources management: %s", self.strategy)
def create_test_client_runner(self):
if self.resource_suffix:
client_namespace_suffix = self.resource_suffix
else:
client_namespace_suffix = (
framework.helpers.rand.random_resource_suffix()
)
logging.info(
"GcpResourceManager: client_namespace_suffix=%s",
client_namespace_suffix,
)
# Kubernetes Test Client
namespace_name = _KubernetesClientRunner.make_namespace_name(
self.resource_prefix, client_namespace_suffix
)
return _KubernetesClientRunner(
k8s.KubernetesNamespace(self.k8s_api_manager, namespace_name),
deployment_name=self.client_name,
image_name=self.client_image,
gcp_project=self.project,
gcp_api_manager=self.gcp_api_manager,
gcp_service_account=self.gcp_service_account,
td_bootstrap_image=self.td_bootstrap_image,
xds_server_uri=self.xds_server_uri,
network=self.network,
debug_use_port_forwarding=self.debug_use_port_forwarding,
enable_workload_identity=self.enable_workload_identity,
stats_port=self.client_port,
)
def _pre_cleanup(self):
# Cleanup existing debris
logging.info("GcpResourceManager: pre clean-up")
self.td.cleanup(force=True)
self.test_server_runner.delete_namespace()
def setup(self, test_case_classes: Iterable["XdsUrlMapTestCase"]) -> None:
if self.strategy not in ["create", "keep"]:
logging.info(
"GcpResourceManager: skipping setup for strategy [%s]",
self.strategy,
)
return
# Clean up debris from previous runs
self._pre_cleanup()
# Start creating GCP resources
logging.info("GcpResourceManager: start setup")
# Firewall
if self.ensure_firewall:
self.td.create_firewall_rule(
allowed_ports=self.firewall_allowed_ports
)
# Health Checks
self.td.create_health_check()
# Backend Services
self.td.create_backend_service()
self.td.create_alternative_backend_service()
self.td.create_affinity_backend_service()
# Construct UrlMap from test classes
aggregator = _UrlMapChangeAggregator(
url_map_name=self.td.make_resource_name(self.td.URL_MAP_NAME)
)
for test_case_class in test_case_classes:
aggregator.apply_change(test_case_class)
final_url_map = aggregator.get_map()
# UrlMap
self.td.create_url_map_with_content(final_url_map)
# Target Proxy
self.td.create_target_proxy()
# Forwarding Rule
self.td.create_forwarding_rule(self.server_xds_port)
# Kubernetes Test Server
self.test_server_runner.run(
test_port=self.server_port,
maintenance_port=self.server_maintenance_port,
)
# Kubernetes Test Server Alternative
self.test_server_alternative_runner.run(
test_port=self.server_port,
maintenance_port=self.server_maintenance_port,
)
# Kubernetes Test Server Affinity. 3 endpoints to test that only the
# picked sub-channel is connected.
self.test_server_affinity_runner.run(
test_port=self.server_port,
maintenance_port=self.server_maintenance_port,
replica_count=3,
)
# Add backend to default backend service
neg_name, neg_zones = self.k8s_namespace.get_service_neg(
self.test_server_runner.service_name, self.server_port
)
self.td.backend_service_add_neg_backends(neg_name, neg_zones)
# Add backend to alternative backend service
neg_name_alt, neg_zones_alt = self.k8s_namespace.get_service_neg(
self.test_server_alternative_runner.service_name, self.server_port
)
self.td.alternative_backend_service_add_neg_backends(
neg_name_alt, neg_zones_alt
)
# Add backend to affinity backend service
(
neg_name_affinity,
neg_zones_affinity,
) = self.k8s_namespace.get_service_neg(
self.test_server_affinity_runner.service_name, self.server_port
)
self.td.affinity_backend_service_add_neg_backends(
neg_name_affinity, neg_zones_affinity
)
# Wait for healthy backends
self.td.wait_for_backends_healthy_status()
self.td.wait_for_alternative_backends_healthy_status()
self.td.wait_for_affinity_backends_healthy_status()
def cleanup(self) -> None:
if self.strategy not in ["create"]:
logging.info(
"GcpResourceManager: skipping tear down for strategy [%s]",
self.strategy,
)
return
logging.info("GcpResourceManager: start tear down")
if hasattr(self, "td"):
self.td.cleanup(force=True)
if hasattr(self, "test_server_runner"):
self.test_server_runner.cleanup(force=True)
if hasattr(self, "test_server_alternative_runner"):
self.test_server_alternative_runner.cleanup(
force=True, force_namespace=True
)
if hasattr(self, "test_server_affinity_runner"):
self.test_server_affinity_runner.cleanup(
force=True, force_namespace=True
)
@functools.lru_cache(None)
def default_backend_service(self) -> str:
"""Returns default backend service URL."""
self.td.load_backend_service()
return self.td.backend_service.url
@functools.lru_cache(None)
def alternative_backend_service(self) -> str:
"""Returns alternative backend service URL."""
self.td.load_alternative_backend_service()
return self.td.alternative_backend_service.url
@functools.lru_cache(None)
def affinity_backend_service(self) -> str:
"""Returns affinity backend service URL."""
self.td.load_affinity_backend_service()
return self.td.affinity_backend_service.url
| 14,430
| 37.380319
| 80
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/xds_k8s_flags.py
|
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl import flags
# GCP
KUBE_CONTEXT = flags.DEFINE_string(
"kube_context", default=None, help="Kubectl context to use"
)
SECONDARY_KUBE_CONTEXT = flags.DEFINE_string(
"secondary_kube_context",
default=None,
help="Secondary kubectl context to use for cluster in another region",
)
GCP_SERVICE_ACCOUNT = flags.DEFINE_string(
"gcp_service_account",
default=None,
help="GCP Service account for GKE workloads to impersonate",
)
TD_BOOTSTRAP_IMAGE = flags.DEFINE_string(
"td_bootstrap_image",
default=None,
help="Traffic Director gRPC Bootstrap Docker image",
)
# Test app
SERVER_IMAGE = flags.DEFINE_string(
"server_image", default=None, help="Server Docker image name"
)
SERVER_IMAGE_CANONICAL = flags.DEFINE_string(
"server_image_canonical",
default=None,
help=(
"The canonical implementation of the xDS test server.\n"
"Can be used in tests where language-specific xDS test server"
"does not exist, or missing a feature required for the test."
),
)
CLIENT_IMAGE = flags.DEFINE_string(
"client_image", default=None, help="Client Docker image name"
)
DEBUG_USE_PORT_FORWARDING = flags.DEFINE_bool(
"debug_use_port_forwarding",
default=False,
help="Development only: use kubectl port-forward to connect to test app",
)
ENABLE_WORKLOAD_IDENTITY = flags.DEFINE_bool(
"enable_workload_identity",
default=True,
help="Enable the WorkloadIdentity feature",
)
flags.mark_flags_as_required(
[
"kube_context",
"td_bootstrap_image",
"server_image",
"client_image",
]
)
| 2,192
| 29.887324
| 77
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/__init__.py
|
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 577
| 40.285714
| 74
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/xds_url_map_testcase.py
|
# Copyright 2021 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A test framework built for urlMap related xDS test cases."""
import abc
from dataclasses import dataclass
import datetime
import json
import os
import re
import sys
import time
from typing import Any, Iterable, Mapping, Optional, Tuple
import unittest
from absl import flags
from absl import logging
from absl.testing import absltest
from google.protobuf import json_format
import grpc
from framework import xds_k8s_testcase
from framework import xds_url_map_test_resources
from framework.helpers import grpc as helpers_grpc
from framework.helpers import retryers
from framework.helpers import skips
from framework.infrastructure import k8s
from framework.test_app import client_app
from framework.test_app.runners.k8s import k8s_xds_client_runner
# Load existing flags
flags.adopt_module_key_flags(xds_k8s_testcase)
flags.adopt_module_key_flags(xds_url_map_test_resources)
# Define urlMap specific flags
QPS = flags.DEFINE_integer("qps", default=25, help="The QPS client is sending")
# Test configs
_URL_MAP_PROPAGATE_TIMEOUT_SEC = 600
# With the per-run IAM change, the first xDS response has a several minutes
# delay. We want to increase the interval, reduce the log spam.
_URL_MAP_PROPAGATE_CHECK_INTERVAL_SEC = 15
URL_MAP_TESTCASE_FILE_SUFFIX = "_test.py"
_CLIENT_CONFIGURE_WAIT_SEC = 2
# Type aliases
XdsTestClient = client_app.XdsTestClient
GcpResourceManager = xds_url_map_test_resources.GcpResourceManager
HostRule = xds_url_map_test_resources.HostRule
PathMatcher = xds_url_map_test_resources.PathMatcher
_KubernetesClientRunner = k8s_xds_client_runner.KubernetesClientRunner
JsonType = Any
_timedelta = datetime.timedelta
# ProtoBuf translatable RpcType enums
RpcTypeUnaryCall = "UNARY_CALL"
RpcTypeEmptyCall = "EMPTY_CALL"
def _split_camel(s: str, delimiter: str = "-") -> str:
"""Turn camel case name to snake-case-like name."""
return "".join(
delimiter + c.lower() if c.isupper() else c for c in s
).lstrip(delimiter)
class DumpedXdsConfig(dict):
"""A convenience class to check xDS config.
Feel free to add more pre-compute fields.
"""
def __init__(self, xds_json: JsonType): # pylint: disable=too-many-branches
super().__init__(xds_json)
self.json_config = xds_json
self.lds = None
self.rds = None
self.rds_version = None
self.cds = []
self.eds = []
self.endpoints = []
for xds_config in self.get("xdsConfig", []):
try:
if "listenerConfig" in xds_config:
self.lds = xds_config["listenerConfig"]["dynamicListeners"][
0
]["activeState"]["listener"]
elif "routeConfig" in xds_config:
self.rds = xds_config["routeConfig"]["dynamicRouteConfigs"][
0
]["routeConfig"]
self.rds_version = xds_config["routeConfig"][
"dynamicRouteConfigs"
][0]["versionInfo"]
elif "clusterConfig" in xds_config:
for cluster in xds_config["clusterConfig"][
"dynamicActiveClusters"
]:
self.cds.append(cluster["cluster"])
elif "endpointConfig" in xds_config:
for endpoint in xds_config["endpointConfig"][
"dynamicEndpointConfigs"
]:
self.eds.append(endpoint["endpointConfig"])
# TODO(lidiz) reduce the catch to LookupError
except Exception as e: # pylint: disable=broad-except
logging.debug(
"Parsing dumped xDS config failed with %s: %s", type(e), e
)
for generic_xds_config in self.get("genericXdsConfigs", []):
try:
if re.search(r"\.Listener$", generic_xds_config["typeUrl"]):
self.lds = generic_xds_config["xdsConfig"]
elif re.search(
r"\.RouteConfiguration$", generic_xds_config["typeUrl"]
):
self.rds = generic_xds_config["xdsConfig"]
self.rds_version = generic_xds_config["versionInfo"]
elif re.search(r"\.Cluster$", generic_xds_config["typeUrl"]):
self.cds.append(generic_xds_config["xdsConfig"])
elif re.search(
r"\.ClusterLoadAssignment$", generic_xds_config["typeUrl"]
):
self.eds.append(generic_xds_config["xdsConfig"])
# TODO(lidiz) reduce the catch to LookupError
except Exception as e: # pylint: disable=broad-except
logging.debug(
"Parsing dumped xDS config failed with %s: %s", type(e), e
)
for endpoint_config in self.eds:
for endpoint in endpoint_config.get("endpoints", {}):
for lb_endpoint in endpoint.get("lbEndpoints", {}):
try:
if lb_endpoint["healthStatus"] == "HEALTHY":
self.endpoints.append(
"%s:%s"
% (
lb_endpoint["endpoint"]["address"][
"socketAddress"
]["address"],
lb_endpoint["endpoint"]["address"][
"socketAddress"
]["portValue"],
)
)
# TODO(lidiz) reduce the catch to LookupError
except Exception as e: # pylint: disable=broad-except
logging.debug(
"Parse endpoint failed with %s: %s", type(e), e
)
def __str__(self) -> str:
return json.dumps(self, indent=2)
class RpcDistributionStats:
"""A convenience class to check RPC distribution.
Feel free to add more pre-compute fields.
"""
num_failures: int
num_oks: int
default_service_rpc_count: int
alternative_service_rpc_count: int
unary_call_default_service_rpc_count: int
empty_call_default_service_rpc_count: int
unary_call_alternative_service_rpc_count: int
empty_call_alternative_service_rpc_count: int
def __init__(self, json_lb_stats: JsonType):
self.num_failures = json_lb_stats.get("numFailures", 0)
self.num_peers = 0
self.num_oks = 0
self.default_service_rpc_count = 0
self.alternative_service_rpc_count = 0
self.unary_call_default_service_rpc_count = 0
self.empty_call_default_service_rpc_count = 0
self.unary_call_alternative_service_rpc_count = 0
self.empty_call_alternative_service_rpc_count = 0
self.raw = json_lb_stats
if "rpcsByPeer" in json_lb_stats:
self.num_peers = len(json_lb_stats["rpcsByPeer"])
if "rpcsByMethod" in json_lb_stats:
for rpc_type in json_lb_stats["rpcsByMethod"]:
for peer in json_lb_stats["rpcsByMethod"][rpc_type][
"rpcsByPeer"
]:
count = json_lb_stats["rpcsByMethod"][rpc_type][
"rpcsByPeer"
][peer]
self.num_oks += count
if rpc_type == "UnaryCall":
if "alternative" in peer:
self.unary_call_alternative_service_rpc_count = (
count
)
self.alternative_service_rpc_count += count
else:
self.unary_call_default_service_rpc_count = count
self.default_service_rpc_count += count
else:
if "alternative" in peer:
self.empty_call_alternative_service_rpc_count = (
count
)
self.alternative_service_rpc_count += count
else:
self.empty_call_default_service_rpc_count = count
self.default_service_rpc_count += count
@dataclass
class ExpectedResult:
"""Describes the expected result of assertRpcStatusCode method below."""
rpc_type: str = RpcTypeUnaryCall
status_code: grpc.StatusCode = grpc.StatusCode.OK
ratio: float = 1
class _MetaXdsUrlMapTestCase(type):
"""Tracking test case subclasses."""
# Automatic discover of all subclasses
_test_case_classes = []
_test_case_names = set()
# Keep track of started and finished test cases, so we know when to setup
# and tear down GCP resources.
_started_test_cases = set()
_finished_test_cases = set()
def __new__(
cls, name: str, bases: Iterable[Any], attrs: Mapping[str, Any]
) -> Any:
# Hand over the tracking objects
attrs["test_case_classes"] = cls._test_case_classes
attrs["test_case_names"] = cls._test_case_names
attrs["started_test_cases"] = cls._started_test_cases
attrs["finished_test_cases"] = cls._finished_test_cases
# Handle the test name reflection
module_name = os.path.split(sys.modules[attrs["__module__"]].__file__)[
-1
]
if module_name.endswith(URL_MAP_TESTCASE_FILE_SUFFIX):
module_name = module_name.replace(URL_MAP_TESTCASE_FILE_SUFFIX, "")
attrs["short_module_name"] = module_name.replace("_", "-")
# Create the class and track
new_class = type.__new__(cls, name, bases, attrs)
if name.startswith("Test"):
cls._test_case_names.add(name)
cls._test_case_classes.append(new_class)
else:
logging.debug("Skipping test case class: %s", name)
return new_class
class XdsUrlMapTestCase(absltest.TestCase, metaclass=_MetaXdsUrlMapTestCase):
"""XdsUrlMapTestCase is the base class for urlMap related tests.
The subclass is expected to implement 3 methods:
- url_map_change: Updates the urlMap components for this test case
- xds_config_validate: Validates if the client received legit xDS configs
- rpc_distribution_validate: Validates if the routing behavior is correct
"""
test_client_runner: Optional[_KubernetesClientRunner] = None
@staticmethod
def is_supported(config: skips.TestConfig) -> bool:
"""Allow the test case to decide whether it supports the given config.
Returns:
A bool indicates if the given config is supported.
"""
del config
return True
@staticmethod
def client_init_config(rpc: str, metadata: str) -> Tuple[str, str]:
"""Updates the initial RPC configs for this test case.
Each test case will start a test client. The client takes RPC configs
and starts to send RPCs immediately. The config returned by this
function will be used to replace the default configs.
The default configs are passed in as arguments, so this method can
modify part of them.
Args:
rpc: The default rpc config, specifying RPCs to send, format
'UnaryCall,EmptyCall'
metadata: The metadata config, specifying metadata to send with each
RPC, format 'EmptyCall:key1:value1,UnaryCall:key2:value2'.
Returns:
A tuple contains the updated rpc and metadata config.
"""
return rpc, metadata
@staticmethod
@abc.abstractmethod
def url_map_change(
host_rule: HostRule, path_matcher: PathMatcher
) -> Tuple[HostRule, PathMatcher]:
"""Updates the dedicated urlMap components for this test case.
Each test case will have a dedicated HostRule, where the hostname is
generated from the test case name. The HostRule will be linked to a
PathMatcher, where stores the routing logic.
Args:
host_rule: A HostRule GCP resource as a JSON dict.
path_matcher: A PathMatcher GCP resource as a JSON dict.
Returns:
A tuple contains the updated version of given HostRule and
PathMatcher.
"""
@abc.abstractmethod
def xds_config_validate(self, xds_config: DumpedXdsConfig) -> None:
"""Validates received xDS config, if anything is wrong, raise.
This stage only ends when the control plane failed to send a valid
config within a given time range, like 600s.
Args:
xds_config: A DumpedXdsConfig instance can be used as a JSON dict,
but also provides helper fields for commonly checked xDS config.
"""
@abc.abstractmethod
def rpc_distribution_validate(self, test_client: XdsTestClient) -> None:
"""Validates the routing behavior, if any is wrong, raise.
Args:
test_client: A XdsTestClient instance for all sorts of end2end testing.
"""
@classmethod
def hostname(cls):
return "%s.%s:%s" % (
cls.short_module_name,
_split_camel(cls.__name__),
GcpResourceManager().server_xds_port,
)
@classmethod
def path_matcher_name(cls):
# Path matcher name must match r'(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)'
return "%s-%s-pm" % (cls.short_module_name, _split_camel(cls.__name__))
@classmethod
def setUpClass(cls):
logging.info("----- Testing %s -----", cls.__name__)
logging.info("Logs timezone: %s", time.localtime().tm_zone)
# Raises unittest.SkipTest if given client/server/version does not
# support current test case.
skips.evaluate_test_config(cls.is_supported)
# Configure cleanup to run after all tests regardless of
# whether setUpClass failed.
cls.addClassCleanup(cls.cleanupAfterTests)
if not cls.started_test_cases:
# Create the GCP resource once before the first test start
GcpResourceManager().setup(cls.test_case_classes)
cls.started_test_cases.add(cls.__name__)
# Create the test case's own client runner with it's own namespace,
# enables concurrent running with other test cases.
cls.test_client_runner = (
GcpResourceManager().create_test_client_runner()
)
# Start the client, and allow the test to override the initial RPC config.
rpc, metadata = cls.client_init_config(
rpc="UnaryCall,EmptyCall", metadata=""
)
cls.test_client = cls.test_client_runner.run(
server_target=f"xds:///{cls.hostname()}",
rpc=rpc,
metadata=metadata,
qps=QPS.value,
print_response=True,
)
@classmethod
def cleanupAfterTests(cls):
logging.info("----- TestCase %s teardown -----", cls.__name__)
client_restarts: int = 0
if cls.test_client_runner:
try:
logging.debug("Getting pods restart times")
client_restarts = cls.test_client_runner.get_pod_restarts(
cls.test_client_runner.deployment
)
except (retryers.RetryError, k8s.NotFound) as e:
logging.exception(e)
cls.finished_test_cases.add(cls.__name__)
# Whether to clean up shared pre-provisioned infrastructure too.
# We only do it after all tests are finished.
cleanup_all = cls.finished_test_cases == cls.test_case_names
# Graceful cleanup: try three times, and don't fail the test on
# a cleanup failure.
retryer = retryers.constant_retryer(
wait_fixed=_timedelta(seconds=10),
attempts=3,
log_level=logging.INFO,
)
try:
retryer(cls._cleanup, cleanup_all)
except retryers.RetryError:
logging.exception("Got error during teardown")
finally:
if hasattr(cls, "test_client_runner") and cls.test_client_runner:
logging.info("----- Test client logs -----")
cls.test_client_runner.logs_explorer_run_history_links()
# Fail if any of the pods restarted.
error_msg = (
"Client pods unexpectedly restarted"
f" {client_restarts} times during test."
" In most cases, this is caused by the test client app crash."
)
assert client_restarts == 0, error_msg
@classmethod
def _cleanup(cls, cleanup_all: bool = False):
if cls.test_client_runner:
cls.test_client_runner.cleanup(force=True, force_namespace=True)
if cleanup_all:
GcpResourceManager().cleanup()
def _fetch_and_check_xds_config(self):
# TODO(lidiz) find another way to store last seen xDS config
# Cleanup state for this attempt
# pylint: disable=attribute-defined-outside-init
self._xds_json_config = None
# Fetch client config
config = self.test_client.csds.fetch_client_status(
log_level=logging.INFO
)
self.assertIsNotNone(config)
# Found client config, test it.
self._xds_json_config = json_format.MessageToDict(config)
# pylint: enable=attribute-defined-outside-init
# Execute the child class provided validation logic
self.xds_config_validate(DumpedXdsConfig(self._xds_json_config))
def run(self, result: unittest.TestResult = None) -> None:
"""Abort this test case if CSDS check is failed.
This prevents the test runner to waste time on RPC distribution test,
and yields clearer signal.
"""
if result.failures or result.errors:
logging.info("Aborting %s", self.__class__.__name__)
else:
super().run(result)
def test_client_config(self):
retryer = retryers.constant_retryer(
wait_fixed=datetime.timedelta(
seconds=_URL_MAP_PROPAGATE_CHECK_INTERVAL_SEC
),
timeout=datetime.timedelta(seconds=_URL_MAP_PROPAGATE_TIMEOUT_SEC),
logger=logging,
log_level=logging.INFO,
)
try:
retryer(self._fetch_and_check_xds_config)
finally:
logging.info(
"latest xDS config:\n%s",
GcpResourceManager().td.compute.resource_pretty_format(
self._xds_json_config
),
)
def test_rpc_distribution(self):
self.rpc_distribution_validate(self.test_client)
@classmethod
def configure_and_send(
cls,
test_client: XdsTestClient,
*,
rpc_types: Iterable[str],
metadata: Optional[Iterable[Tuple[str, str, str]]] = None,
app_timeout: Optional[int] = None,
num_rpcs: int,
) -> RpcDistributionStats:
test_client.update_config.configure(
rpc_types=rpc_types, metadata=metadata, app_timeout=app_timeout
)
# Configure RPC might race with get stats RPC on slower machines.
time.sleep(_CLIENT_CONFIGURE_WAIT_SEC)
lb_stats = test_client.get_load_balancer_stats(num_rpcs=num_rpcs)
logging.info(
"[%s] << Received LoadBalancerStatsResponse:\n%s",
test_client.hostname,
helpers_grpc.lb_stats_pretty(lb_stats),
)
return RpcDistributionStats(json_format.MessageToDict(lb_stats))
def assertNumEndpoints(self, xds_config: DumpedXdsConfig, k: int) -> None:
self.assertLen(
xds_config.endpoints,
k,
(
"insufficient endpoints in EDS:"
f" want={k} seen={xds_config.endpoints}"
),
)
def assertRpcStatusCode( # pylint: disable=too-many-locals
self,
test_client: XdsTestClient,
*,
expected: Iterable[ExpectedResult],
length: int,
tolerance: float,
) -> None:
"""Assert the distribution of RPC statuses over a period of time."""
# Sending with pre-set QPS for a period of time
before_stats = test_client.get_load_balancer_accumulated_stats()
logging.info(
(
"Received LoadBalancerAccumulatedStatsResponse from test client"
" %s: before:\n%s"
),
test_client.hostname,
helpers_grpc.accumulated_stats_pretty(before_stats),
)
time.sleep(length)
after_stats = test_client.get_load_balancer_accumulated_stats()
logging.info(
(
"Received LoadBalancerAccumulatedStatsResponse from test client"
" %s: after: \n%s"
),
test_client.hostname,
helpers_grpc.accumulated_stats_pretty(after_stats),
)
# Validate the diff
for expected_result in expected:
rpc = expected_result.rpc_type
status = expected_result.status_code.value[0]
# Compute observation
# ProtoBuf messages has special magic dictionary that we don't need
# to catch exceptions:
# https://developers.google.com/protocol-buffers/docs/reference/python-generated#undefined
seen_after = after_stats.stats_per_method[rpc].result[status]
seen_before = before_stats.stats_per_method[rpc].result[status]
seen = seen_after - seen_before
# Compute total number of RPC started
stats_per_method_after = after_stats.stats_per_method.get(
rpc, {}
).result.items()
total_after = sum(
x[1] for x in stats_per_method_after
) # (status_code, count)
stats_per_method_before = before_stats.stats_per_method.get(
rpc, {}
).result.items()
total_before = sum(
x[1] for x in stats_per_method_before
) # (status_code, count)
total = total_after - total_before
# Compute and validate the number
want = total * expected_result.ratio
diff_ratio = abs(seen - want) / total
self.assertLessEqual(
diff_ratio,
tolerance,
(
f"Expect rpc [{rpc}] to return "
f"[{expected_result.status_code}] at "
f"{expected_result.ratio:.2f} ratio: "
f"seen={seen} want={want} total={total} "
f"diff_ratio={diff_ratio:.4f} > {tolerance:.2f}"
),
)
| 23,557
| 38.067993
| 102
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/xds_flags.py
|
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
from absl import flags
from framework.helpers import highlighter
# GCP
PROJECT = flags.DEFINE_string(
"project", default=None, help="(required) GCP Project ID."
)
RESOURCE_PREFIX = flags.DEFINE_string(
"resource_prefix",
default=None,
help=(
"(required) The prefix used to name GCP resources.\n"
"Together with `resource_suffix` used to create unique "
"resource names."
),
)
RESOURCE_SUFFIX = flags.DEFINE_string(
"resource_suffix",
default=None,
help=(
"The suffix used to name GCP resources.\n"
"Together with `resource_prefix` used to create unique "
"resource names.\n"
"(default: test suite will generate a random suffix, based on suite "
"resource management preferences)"
),
)
NETWORK = flags.DEFINE_string(
"network", default="default", help="GCP Network ID"
)
COMPUTE_API_VERSION = flags.DEFINE_string(
"compute_api_version",
default="v1",
help="The version of the GCP Compute API, e.g., v1, v1alpha",
)
# Mirrors --xds-server-uri argument of Traffic Director gRPC Bootstrap
XDS_SERVER_URI = flags.DEFINE_string(
"xds_server_uri", default=None, help="Override Traffic Director server URI."
)
ENSURE_FIREWALL = flags.DEFINE_bool(
"ensure_firewall",
default=False,
help="Ensure the allow-health-check firewall exists before each test case",
)
FIREWALL_SOURCE_RANGE = flags.DEFINE_list(
"firewall_source_range",
default=["35.191.0.0/16", "130.211.0.0/22"],
help="Update the source range of the firewall rule.",
)
FIREWALL_ALLOWED_PORTS = flags.DEFINE_list(
"firewall_allowed_ports",
default=["8080-8100"],
help="Update the allowed ports of the firewall rule.",
)
# Test server
SERVER_NAME = flags.DEFINE_string(
"server_name",
default="psm-grpc-server",
help="The name to use for test server deployments.",
)
SERVER_PORT = flags.DEFINE_integer(
"server_port",
default=8080,
lower_bound=1,
upper_bound=65535,
help="Server test port.\nMust be within --firewall_allowed_ports.",
)
SERVER_MAINTENANCE_PORT = flags.DEFINE_integer(
"server_maintenance_port",
default=None,
lower_bound=1,
upper_bound=65535,
help=(
"Server port running maintenance services: Channelz, CSDS, Health, "
"XdsUpdateHealth, and ProtoReflection (optional).\n"
"Must be within --firewall_allowed_ports.\n"
"(default: the port is chosen automatically based on "
"the security configuration)"
),
)
SERVER_XDS_HOST = flags.DEFINE_string(
"server_xds_host",
default="xds-test-server",
help=(
"The xDS hostname of the test server.\n"
"Together with `server_xds_port` makes test server target URI, "
"xds:///hostname:port"
),
)
# Note: port 0 known to represent a request for dynamically-allocated port
# https://en.wikipedia.org/wiki/List_of_TCP_and_UDP_port_numbers#Well-known_ports
SERVER_XDS_PORT = flags.DEFINE_integer(
"server_xds_port",
default=8080,
lower_bound=0,
upper_bound=65535,
help=(
"The xDS port of the test server.\n"
"Together with `server_xds_host` makes test server target URI, "
"xds:///hostname:port\n"
"Must be unique within a GCP project.\n"
"Set to 0 to select any unused port."
),
)
# Test client
CLIENT_NAME = flags.DEFINE_string(
"client_name",
default="psm-grpc-client",
help="The name to use for test client deployments",
)
CLIENT_PORT = flags.DEFINE_integer(
"client_port",
default=8079,
lower_bound=1,
upper_bound=65535,
help=(
"The port test client uses to run gRPC services: Channelz, CSDS, "
"XdsStats, XdsUpdateClientConfigure, and ProtoReflection (optional).\n"
"Doesn't have to be within --firewall_allowed_ports."
),
)
# Testing metadata
TESTING_VERSION = flags.DEFINE_string(
"testing_version",
default=None,
help="The testing gRPC version branch name. Like master, dev, v1.55.x",
)
FORCE_CLEANUP = flags.DEFINE_bool(
"force_cleanup",
default=False,
help="Force resource cleanup, even if not created by this test run",
)
COLLECT_APP_LOGS = flags.DEFINE_bool(
"collect_app_logs",
default=False,
help=(
f"Collect the logs of the xDS Test Client and Server\n"
f"into the test_app_logs/ directory under the log directory.\n"
f"See --log_dir description for configuring the log directory."
),
)
# Needed to configure urllib3 socket timeout, which is infinity by default.
SOCKET_DEFAULT_TIMEOUT = flags.DEFINE_float(
"socket_default_timeout",
default=60,
lower_bound=0,
help=(
"Set the default timeout in seconds on blocking socket operations.\n"
"If zero is given, the new sockets have no timeout. "
),
)
def set_socket_default_timeout_from_flag() -> None:
"""A helper to configure default socket timeout from a flag.
This is known to affect the following pip packages:
- google-api-python-client: has the default timeout set to 60:
https://googleapis.github.io/google-api-python-client/docs/epy/googleapiclient.http-module.html#build_http
- kubernetes: falls back to urllib3 timeout, which is infinity by default:
https://urllib3.readthedocs.io/en/stable/reference/urllib3.util.html#urllib3.util.Timeout
NOTE: Must be called _after_ the flags were parsed by absl, but before
the before KubernetesApiManager or GcpApiManager initialized.
"""
timeout: float = SOCKET_DEFAULT_TIMEOUT.value
# None is inf timeout, which is represented by 0 in the flag.
socket.setdefaulttimeout(None if timeout == 0 else timeout)
flags.adopt_module_key_flags(highlighter)
flags.mark_flags_as_required(
[
"project",
"resource_prefix",
]
)
| 6,438
| 30.876238
| 114
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/xds_k8s_testcase.py
|
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import contextlib
import datetime
import enum
import hashlib
import logging
import re
import signal
import time
from types import FrameType
from typing import Any, Callable, List, Optional, Tuple, Union
from absl import flags
from absl.testing import absltest
from google.protobuf import json_format
import grpc
from framework import xds_flags
from framework import xds_k8s_flags
from framework import xds_url_map_testcase
from framework.helpers import grpc as helpers_grpc
from framework.helpers import rand as helpers_rand
from framework.helpers import retryers
from framework.helpers import skips
import framework.helpers.highlighter
from framework.infrastructure import gcp
from framework.infrastructure import k8s
from framework.infrastructure import traffic_director
from framework.rpc import grpc_channelz
from framework.rpc import grpc_csds
from framework.rpc import grpc_testing
from framework.test_app import client_app
from framework.test_app import server_app
from framework.test_app.runners.k8s import k8s_xds_client_runner
from framework.test_app.runners.k8s import k8s_xds_server_runner
logger = logging.getLogger(__name__)
# TODO(yashkt): We will no longer need this flag once Core exposes local certs
# from channelz
_CHECK_LOCAL_CERTS = flags.DEFINE_bool(
"check_local_certs",
default=True,
help="Security Tests also check the value of local certs",
)
flags.adopt_module_key_flags(xds_flags)
flags.adopt_module_key_flags(xds_k8s_flags)
# Type aliases
TrafficDirectorManager = traffic_director.TrafficDirectorManager
TrafficDirectorAppNetManager = traffic_director.TrafficDirectorAppNetManager
TrafficDirectorSecureManager = traffic_director.TrafficDirectorSecureManager
XdsTestServer = server_app.XdsTestServer
XdsTestClient = client_app.XdsTestClient
KubernetesServerRunner = k8s_xds_server_runner.KubernetesServerRunner
KubernetesClientRunner = k8s_xds_client_runner.KubernetesClientRunner
_LoadBalancerStatsResponse = grpc_testing.LoadBalancerStatsResponse
_LoadBalancerAccumulatedStatsResponse = (
grpc_testing.LoadBalancerAccumulatedStatsResponse
)
_ChannelState = grpc_channelz.ChannelState
_timedelta = datetime.timedelta
ClientConfig = grpc_csds.ClientConfig
# pylint complains about signal.Signals for some reason.
_SignalNum = Union[int, signal.Signals] # pylint: disable=no-member
_SignalHandler = Callable[[_SignalNum, Optional[FrameType]], Any]
_TD_CONFIG_MAX_WAIT_SEC = 600
class TdPropagationRetryableError(Exception):
"""Indicates that TD config hasn't propagated yet, and it's safe to retry"""
class XdsKubernetesBaseTestCase(absltest.TestCase):
lang_spec: skips.TestConfig
client_namespace: str
client_runner: KubernetesClientRunner
ensure_firewall: bool
force_cleanup: bool
gcp_api_manager: gcp.api.GcpApiManager
gcp_service_account: Optional[str]
k8s_api_manager: k8s.KubernetesApiManager
secondary_k8s_api_manager: k8s.KubernetesApiManager
network: str
project: str
resource_prefix: str
resource_suffix: str = ""
# Whether to randomize resources names for each test by appending a
# unique suffix.
resource_suffix_randomize: bool = True
server_maintenance_port: Optional[int]
server_namespace: str
server_runner: KubernetesServerRunner
server_xds_host: str
server_xds_port: int
td: TrafficDirectorManager
td_bootstrap_image: str
_prev_sigint_handler: Optional[_SignalHandler] = None
_handling_sigint: bool = False
yaml_highlighter: framework.helpers.highlighter.HighlighterYaml = None
@staticmethod
def is_supported(config: skips.TestConfig) -> bool:
"""Overridden by the test class to decide if the config is supported.
Returns:
A bool indicates if the given config is supported.
"""
del config
return True
@classmethod
def setUpClass(cls):
"""Hook method for setting up class fixture before running tests in
the class.
"""
logger.info("----- Testing %s -----", cls.__name__)
logger.info("Logs timezone: %s", time.localtime().tm_zone)
# Raises unittest.SkipTest if given client/server/version does not
# support current test case.
cls.lang_spec = skips.evaluate_test_config(cls.is_supported)
# Must be called before KubernetesApiManager or GcpApiManager init.
xds_flags.set_socket_default_timeout_from_flag()
# GCP
cls.project = xds_flags.PROJECT.value
cls.network = xds_flags.NETWORK.value
cls.gcp_service_account = xds_k8s_flags.GCP_SERVICE_ACCOUNT.value
cls.td_bootstrap_image = xds_k8s_flags.TD_BOOTSTRAP_IMAGE.value
cls.xds_server_uri = xds_flags.XDS_SERVER_URI.value
cls.ensure_firewall = xds_flags.ENSURE_FIREWALL.value
cls.firewall_allowed_ports = xds_flags.FIREWALL_ALLOWED_PORTS.value
cls.compute_api_version = xds_flags.COMPUTE_API_VERSION.value
# Resource names.
cls.resource_prefix = xds_flags.RESOURCE_PREFIX.value
if xds_flags.RESOURCE_SUFFIX.value is not None:
cls.resource_suffix_randomize = False
cls.resource_suffix = xds_flags.RESOURCE_SUFFIX.value
# Test server
cls.server_image = xds_k8s_flags.SERVER_IMAGE.value
cls.server_name = xds_flags.SERVER_NAME.value
cls.server_port = xds_flags.SERVER_PORT.value
cls.server_maintenance_port = xds_flags.SERVER_MAINTENANCE_PORT.value
cls.server_xds_host = xds_flags.SERVER_NAME.value
cls.server_xds_port = xds_flags.SERVER_XDS_PORT.value
# Test client
cls.client_image = xds_k8s_flags.CLIENT_IMAGE.value
cls.client_name = xds_flags.CLIENT_NAME.value
cls.client_port = xds_flags.CLIENT_PORT.value
# Test suite settings
cls.force_cleanup = xds_flags.FORCE_CLEANUP.value
cls.debug_use_port_forwarding = (
xds_k8s_flags.DEBUG_USE_PORT_FORWARDING.value
)
cls.enable_workload_identity = (
xds_k8s_flags.ENABLE_WORKLOAD_IDENTITY.value
)
cls.check_local_certs = _CHECK_LOCAL_CERTS.value
# Resource managers
cls.k8s_api_manager = k8s.KubernetesApiManager(
xds_k8s_flags.KUBE_CONTEXT.value
)
cls.secondary_k8s_api_manager = k8s.KubernetesApiManager(
xds_k8s_flags.SECONDARY_KUBE_CONTEXT.value
)
cls.gcp_api_manager = gcp.api.GcpApiManager()
# Other
cls.yaml_highlighter = framework.helpers.highlighter.HighlighterYaml()
@classmethod
def _pretty_accumulated_stats(
cls,
accumulated_stats: _LoadBalancerAccumulatedStatsResponse,
*,
ignore_empty: bool = False,
highlight: bool = True,
) -> str:
stats_yaml = helpers_grpc.accumulated_stats_pretty(
accumulated_stats, ignore_empty=ignore_empty
)
if not highlight:
return stats_yaml
return cls.yaml_highlighter.highlight(stats_yaml)
@classmethod
def _pretty_lb_stats(cls, lb_stats: _LoadBalancerStatsResponse) -> str:
stats_yaml = helpers_grpc.lb_stats_pretty(lb_stats)
return cls.yaml_highlighter.highlight(stats_yaml)
@classmethod
def tearDownClass(cls):
cls.k8s_api_manager.close()
cls.secondary_k8s_api_manager.close()
cls.gcp_api_manager.close()
def setUp(self):
self._prev_sigint_handler = signal.signal(
signal.SIGINT, self.handle_sigint
)
def handle_sigint(
self, signalnum: _SignalNum, frame: Optional[FrameType]
) -> None:
logger.info("Caught Ctrl+C, cleaning up...")
self._handling_sigint = True
# Force resource cleanup by their name. Addresses the case where ctrl-c
# is pressed while waiting for the resource creation.
self.force_cleanup = True
self.tearDown()
self.tearDownClass()
self._handling_sigint = False
if self._prev_sigint_handler is not None:
signal.signal(signal.SIGINT, self._prev_sigint_handler)
raise KeyboardInterrupt
@contextlib.contextmanager
def subTest(self, msg, **params): # noqa pylint: disable=signature-differs
logger.info("--- Starting subTest %s.%s ---", self.id(), msg)
try:
yield super().subTest(msg, **params)
finally:
if not self._handling_sigint:
logger.info("--- Finished subTest %s.%s ---", self.id(), msg)
def setupTrafficDirectorGrpc(self):
self.td.setup_for_grpc(
self.server_xds_host,
self.server_xds_port,
health_check_port=self.server_maintenance_port,
)
def setupServerBackends(
self,
*,
wait_for_healthy_status=True,
server_runner=None,
max_rate_per_endpoint: Optional[int] = None,
):
if server_runner is None:
server_runner = self.server_runner
# Load Backends
neg_name, neg_zones = server_runner.k8s_namespace.get_service_neg(
server_runner.service_name, self.server_port
)
# Add backends to the Backend Service
self.td.backend_service_add_neg_backends(
neg_name, neg_zones, max_rate_per_endpoint=max_rate_per_endpoint
)
if wait_for_healthy_status:
self.td.wait_for_backends_healthy_status()
def removeServerBackends(self, *, server_runner=None):
if server_runner is None:
server_runner = self.server_runner
# Load Backends
neg_name, neg_zones = server_runner.k8s_namespace.get_service_neg(
server_runner.service_name, self.server_port
)
# Remove backends from the Backend Service
self.td.backend_service_remove_neg_backends(neg_name, neg_zones)
def assertSuccessfulRpcs(
self, test_client: XdsTestClient, num_rpcs: int = 100
):
lb_stats = self.getClientRpcStats(test_client, num_rpcs)
self.assertAllBackendsReceivedRpcs(lb_stats)
failed = int(lb_stats.num_failures)
self.assertLessEqual(
failed,
0,
msg=f"Expected all RPCs to succeed: {failed} of {num_rpcs} failed",
)
@staticmethod
def diffAccumulatedStatsPerMethod(
before: _LoadBalancerAccumulatedStatsResponse,
after: _LoadBalancerAccumulatedStatsResponse,
) -> _LoadBalancerAccumulatedStatsResponse:
"""Only diffs stats_per_method, as the other fields are deprecated."""
diff = _LoadBalancerAccumulatedStatsResponse()
for method, method_stats in after.stats_per_method.items():
for status, count in method_stats.result.items():
count -= before.stats_per_method[method].result[status]
if count < 0:
raise AssertionError("Diff of count shouldn't be negative")
if count > 0:
diff.stats_per_method[method].result[status] = count
rpcs_started = (
method_stats.rpcs_started
- before.stats_per_method[method].rpcs_started
)
if rpcs_started < 0:
raise AssertionError("Diff of count shouldn't be negative")
diff.stats_per_method[method].rpcs_started = rpcs_started
return diff
def assertRpcStatusCodes(
self,
test_client: XdsTestClient,
*,
expected_status: grpc.StatusCode,
duration: _timedelta,
method: str,
stray_rpc_limit: int = 0,
) -> None:
"""Assert all RPCs for a method are completing with a certain status."""
# pylint: disable=too-many-locals
expected_status_int: int = expected_status.value[0]
expected_status_fmt: str = helpers_grpc.status_pretty(expected_status)
# Sending with pre-set QPS for a period of time
before_stats = test_client.get_load_balancer_accumulated_stats()
logging.debug(
(
"[%s] << LoadBalancerAccumulatedStatsResponse initial"
" measurement:\n%s"
),
test_client.hostname,
self._pretty_accumulated_stats(before_stats),
)
time.sleep(duration.total_seconds())
after_stats = test_client.get_load_balancer_accumulated_stats()
logging.debug(
(
"[%s] << LoadBalancerAccumulatedStatsResponse after %s seconds:"
"\n%s"
),
test_client.hostname,
duration.total_seconds(),
self._pretty_accumulated_stats(after_stats),
)
diff_stats = self.diffAccumulatedStatsPerMethod(
before_stats, after_stats
)
logger.info(
(
"[%s] << Received accumulated stats difference."
" Expecting RPCs with status %s for method %s:\n%s"
),
test_client.hostname,
expected_status_fmt,
method,
self._pretty_accumulated_stats(diff_stats, ignore_empty=True),
)
# Used in stack traces. Don't highlight for better compatibility.
diff_stats_fmt: str = self._pretty_accumulated_stats(
diff_stats, ignore_empty=True, highlight=False
)
# 1. Verify the completed RPCs of the given method has no statuses
# other than the expected_status,
stats = diff_stats.stats_per_method[method]
for found_status_int, count in stats.result.items():
found_status = helpers_grpc.status_from_int(found_status_int)
if found_status != expected_status and count > stray_rpc_limit:
self.fail(
f"Expected only status {expected_status_fmt},"
" but found status"
f" {helpers_grpc.status_pretty(found_status)}"
f" for method {method}."
f"\nDiff stats:\n{diff_stats_fmt}"
)
# 2. Verify there are completed RPCs of the given method with
# the expected_status.
self.assertGreater(
stats.result[expected_status_int],
0,
msg=(
"Expected non-zero completed RPCs with status"
f" {expected_status_fmt} for method {method}."
f"\nDiff stats:\n{diff_stats_fmt}"
),
)
def assertRpcsEventuallyGoToGivenServers(
self,
test_client: XdsTestClient,
servers: List[XdsTestServer],
num_rpcs: int = 100,
):
retryer = retryers.constant_retryer(
wait_fixed=datetime.timedelta(seconds=1),
timeout=datetime.timedelta(seconds=_TD_CONFIG_MAX_WAIT_SEC),
log_level=logging.INFO,
)
try:
retryer(
self._assertRpcsEventuallyGoToGivenServers,
test_client,
servers,
num_rpcs,
)
except retryers.RetryError as retry_error:
logger.exception(
"Rpcs did not go to expected servers before timeout %s",
_TD_CONFIG_MAX_WAIT_SEC,
)
raise retry_error
def _assertRpcsEventuallyGoToGivenServers(
self,
test_client: XdsTestClient,
servers: List[XdsTestServer],
num_rpcs: int,
):
server_hostnames = [server.hostname for server in servers]
logger.info("Verifying RPCs go to servers %s", server_hostnames)
lb_stats = self.getClientRpcStats(test_client, num_rpcs)
failed = int(lb_stats.num_failures)
self.assertLessEqual(
failed,
0,
msg=f"Expected all RPCs to succeed: {failed} of {num_rpcs} failed",
)
for server_hostname in server_hostnames:
self.assertIn(
server_hostname,
lb_stats.rpcs_by_peer,
f"Server {server_hostname} did not receive RPCs",
)
for server_hostname in lb_stats.rpcs_by_peer.keys():
self.assertIn(
server_hostname,
server_hostnames,
f"Unexpected server {server_hostname} received RPCs",
)
def assertXdsConfigExists(self, test_client: XdsTestClient):
config = test_client.csds.fetch_client_status(log_level=logging.INFO)
self.assertIsNotNone(config)
seen = set()
want = frozenset(
[
"listener_config",
"cluster_config",
"route_config",
"endpoint_config",
]
)
for xds_config in config.xds_config:
seen.add(xds_config.WhichOneof("per_xds_config"))
for generic_xds_config in config.generic_xds_configs:
if re.search(r"\.Listener$", generic_xds_config.type_url):
seen.add("listener_config")
elif re.search(
r"\.RouteConfiguration$", generic_xds_config.type_url
):
seen.add("route_config")
elif re.search(r"\.Cluster$", generic_xds_config.type_url):
seen.add("cluster_config")
elif re.search(
r"\.ClusterLoadAssignment$", generic_xds_config.type_url
):
seen.add("endpoint_config")
logger.debug(
"Received xDS config dump: %s",
json_format.MessageToJson(config, indent=2),
)
self.assertSameElements(want, seen)
def assertRouteConfigUpdateTrafficHandoff(
self,
test_client: XdsTestClient,
previous_route_config_version: str,
retry_wait_second: int,
timeout_second: int,
):
retryer = retryers.constant_retryer(
wait_fixed=datetime.timedelta(seconds=retry_wait_second),
timeout=datetime.timedelta(seconds=timeout_second),
retry_on_exceptions=(TdPropagationRetryableError,),
logger=logger,
log_level=logging.INFO,
)
try:
for attempt in retryer:
with attempt:
self.assertSuccessfulRpcs(test_client)
raw_config = test_client.csds.fetch_client_status(
log_level=logging.INFO
)
dumped_config = xds_url_map_testcase.DumpedXdsConfig(
json_format.MessageToDict(raw_config)
)
route_config_version = dumped_config.rds_version
if previous_route_config_version == route_config_version:
logger.info(
"Routing config not propagated yet. Retrying."
)
raise TdPropagationRetryableError(
"CSDS not get updated routing config corresponding"
" to the second set of url maps"
)
else:
self.assertSuccessfulRpcs(test_client)
logger.info(
(
"[SUCCESS] Confirmed successful RPC with the "
"updated routing config, version=%s"
),
route_config_version,
)
except retryers.RetryError as retry_error:
logger.info(
(
"Retry exhausted. TD routing config propagation failed"
" after timeout %ds. Last seen client config dump: %s"
),
timeout_second,
dumped_config,
)
raise retry_error
def assertFailedRpcs(
self, test_client: XdsTestClient, num_rpcs: Optional[int] = 100
):
lb_stats = self.getClientRpcStats(test_client, num_rpcs)
failed = int(lb_stats.num_failures)
self.assertEqual(
failed,
num_rpcs,
msg=f"Expected all RPCs to fail: {failed} of {num_rpcs} failed",
)
@classmethod
def getClientRpcStats(
cls, test_client: XdsTestClient, num_rpcs: int
) -> _LoadBalancerStatsResponse:
lb_stats = test_client.get_load_balancer_stats(num_rpcs=num_rpcs)
logger.info(
"[%s] << Received LoadBalancerStatsResponse:\n%s",
test_client.hostname,
cls._pretty_lb_stats(lb_stats),
)
return lb_stats
def assertAllBackendsReceivedRpcs(self, lb_stats):
# TODO(sergiitk): assert backends length
for backend, rpcs_count in lb_stats.rpcs_by_peer.items():
self.assertGreater(
int(rpcs_count),
0,
msg=f"Backend {backend} did not receive a single RPC",
)
class IsolatedXdsKubernetesTestCase(
XdsKubernetesBaseTestCase, metaclass=abc.ABCMeta
):
"""Isolated test case.
Base class for tests cases where infra resources are created before
each test, and destroyed after.
"""
def setUp(self):
"""Hook method for setting up the test fixture before exercising it."""
super().setUp()
if self.resource_suffix_randomize:
self.resource_suffix = helpers_rand.random_resource_suffix()
logger.info(
"Test run resource prefix: %s, suffix: %s",
self.resource_prefix,
self.resource_suffix,
)
# TD Manager
self.td = self.initTrafficDirectorManager()
# Test Server runner
self.server_namespace = KubernetesServerRunner.make_namespace_name(
self.resource_prefix, self.resource_suffix
)
self.server_runner = self.initKubernetesServerRunner()
# Test Client runner
self.client_namespace = KubernetesClientRunner.make_namespace_name(
self.resource_prefix, self.resource_suffix
)
self.client_runner = self.initKubernetesClientRunner()
# Ensures the firewall exist
if self.ensure_firewall:
self.td.create_firewall_rule(
allowed_ports=self.firewall_allowed_ports
)
# Randomize xds port, when it's set to 0
if self.server_xds_port == 0:
# TODO(sergiitk): this is prone to race conditions:
# The port might not me taken now, but there's not guarantee
# it won't be taken until the tests get to creating
# forwarding rule. This check is better than nothing,
# but we should find a better approach.
self.server_xds_port = self.td.find_unused_forwarding_rule_port()
logger.info("Found unused xds port: %s", self.server_xds_port)
@abc.abstractmethod
def initTrafficDirectorManager(self) -> TrafficDirectorManager:
raise NotImplementedError
@abc.abstractmethod
def initKubernetesServerRunner(self) -> KubernetesServerRunner:
raise NotImplementedError
@abc.abstractmethod
def initKubernetesClientRunner(self) -> KubernetesClientRunner:
raise NotImplementedError
def tearDown(self):
logger.info("----- TestMethod %s teardown -----", self.id())
logger.debug("Getting pods restart times")
client_restarts: int = 0
server_restarts: int = 0
try:
client_restarts = self.client_runner.get_pod_restarts(
self.client_runner.deployment
)
server_restarts = self.server_runner.get_pod_restarts(
self.server_runner.deployment
)
except (retryers.RetryError, k8s.NotFound) as e:
logger.exception(e)
retryer = retryers.constant_retryer(
wait_fixed=_timedelta(seconds=10),
attempts=3,
log_level=logging.INFO,
)
try:
retryer(self.cleanup)
except retryers.RetryError:
logger.exception("Got error during teardown")
finally:
logger.info("----- Test client/server logs -----")
self.client_runner.logs_explorer_run_history_links()
self.server_runner.logs_explorer_run_history_links()
# Fail if any of the pods restarted.
self.assertEqual(
client_restarts,
0,
msg=(
"Client pods unexpectedly restarted"
f" {client_restarts} times during test. In most cases, this"
" is caused by the test client app crash."
),
)
self.assertEqual(
server_restarts,
0,
msg=(
"Server pods unexpectedly restarted"
f" {server_restarts} times during test. In most cases, this"
" is caused by the test client app crash."
),
)
def cleanup(self):
self.td.cleanup(force=self.force_cleanup)
self.client_runner.cleanup(force=self.force_cleanup)
self.server_runner.cleanup(
force=self.force_cleanup, force_namespace=self.force_cleanup
)
class RegularXdsKubernetesTestCase(IsolatedXdsKubernetesTestCase):
"""Regular test case base class for testing PSM features in isolation."""
@classmethod
def setUpClass(cls):
"""Hook method for setting up class fixture before running tests in
the class.
"""
super().setUpClass()
if cls.server_maintenance_port is None:
cls.server_maintenance_port = (
KubernetesServerRunner.DEFAULT_MAINTENANCE_PORT
)
def initTrafficDirectorManager(self) -> TrafficDirectorManager:
return TrafficDirectorManager(
self.gcp_api_manager,
project=self.project,
resource_prefix=self.resource_prefix,
resource_suffix=self.resource_suffix,
network=self.network,
compute_api_version=self.compute_api_version,
)
def initKubernetesServerRunner(self) -> KubernetesServerRunner:
return KubernetesServerRunner(
k8s.KubernetesNamespace(
self.k8s_api_manager, self.server_namespace
),
deployment_name=self.server_name,
image_name=self.server_image,
td_bootstrap_image=self.td_bootstrap_image,
gcp_project=self.project,
gcp_api_manager=self.gcp_api_manager,
gcp_service_account=self.gcp_service_account,
xds_server_uri=self.xds_server_uri,
network=self.network,
debug_use_port_forwarding=self.debug_use_port_forwarding,
enable_workload_identity=self.enable_workload_identity,
)
def initKubernetesClientRunner(self) -> KubernetesClientRunner:
return KubernetesClientRunner(
k8s.KubernetesNamespace(
self.k8s_api_manager, self.client_namespace
),
deployment_name=self.client_name,
image_name=self.client_image,
td_bootstrap_image=self.td_bootstrap_image,
gcp_project=self.project,
gcp_api_manager=self.gcp_api_manager,
gcp_service_account=self.gcp_service_account,
xds_server_uri=self.xds_server_uri,
network=self.network,
debug_use_port_forwarding=self.debug_use_port_forwarding,
enable_workload_identity=self.enable_workload_identity,
stats_port=self.client_port,
reuse_namespace=self.server_namespace == self.client_namespace,
)
def startTestServers(
self, replica_count=1, server_runner=None, **kwargs
) -> List[XdsTestServer]:
if server_runner is None:
server_runner = self.server_runner
test_servers = server_runner.run(
replica_count=replica_count,
test_port=self.server_port,
maintenance_port=self.server_maintenance_port,
**kwargs,
)
for test_server in test_servers:
test_server.set_xds_address(
self.server_xds_host, self.server_xds_port
)
return test_servers
def startTestClient(
self, test_server: XdsTestServer, **kwargs
) -> XdsTestClient:
test_client = self.client_runner.run(
server_target=test_server.xds_uri, **kwargs
)
test_client.wait_for_active_server_channel()
return test_client
class AppNetXdsKubernetesTestCase(RegularXdsKubernetesTestCase):
td: TrafficDirectorAppNetManager
def initTrafficDirectorManager(self) -> TrafficDirectorAppNetManager:
return TrafficDirectorAppNetManager(
self.gcp_api_manager,
project=self.project,
resource_prefix=self.resource_prefix,
resource_suffix=self.resource_suffix,
network=self.network,
compute_api_version=self.compute_api_version,
)
class SecurityXdsKubernetesTestCase(IsolatedXdsKubernetesTestCase):
"""Test case base class for testing PSM security features in isolation."""
td: TrafficDirectorSecureManager
class SecurityMode(enum.Enum):
MTLS = enum.auto()
TLS = enum.auto()
PLAINTEXT = enum.auto()
@classmethod
def setUpClass(cls):
"""Hook method for setting up class fixture before running tests in
the class.
"""
super().setUpClass()
if cls.server_maintenance_port is None:
# In secure mode, the maintenance port is different from
# the test port to keep it insecure, and make
# Health Checks and Channelz tests available.
# When not provided, use explicit numeric port value, so
# Backend Health Checks are created on a fixed port.
cls.server_maintenance_port = (
KubernetesServerRunner.DEFAULT_SECURE_MODE_MAINTENANCE_PORT
)
def initTrafficDirectorManager(self) -> TrafficDirectorSecureManager:
return TrafficDirectorSecureManager(
self.gcp_api_manager,
project=self.project,
resource_prefix=self.resource_prefix,
resource_suffix=self.resource_suffix,
network=self.network,
compute_api_version=self.compute_api_version,
)
def initKubernetesServerRunner(self) -> KubernetesServerRunner:
return KubernetesServerRunner(
k8s.KubernetesNamespace(
self.k8s_api_manager, self.server_namespace
),
deployment_name=self.server_name,
image_name=self.server_image,
td_bootstrap_image=self.td_bootstrap_image,
gcp_project=self.project,
gcp_api_manager=self.gcp_api_manager,
gcp_service_account=self.gcp_service_account,
network=self.network,
xds_server_uri=self.xds_server_uri,
deployment_template="server-secure.deployment.yaml",
debug_use_port_forwarding=self.debug_use_port_forwarding,
)
def initKubernetesClientRunner(self) -> KubernetesClientRunner:
return KubernetesClientRunner(
k8s.KubernetesNamespace(
self.k8s_api_manager, self.client_namespace
),
deployment_name=self.client_name,
image_name=self.client_image,
td_bootstrap_image=self.td_bootstrap_image,
gcp_project=self.project,
gcp_api_manager=self.gcp_api_manager,
gcp_service_account=self.gcp_service_account,
xds_server_uri=self.xds_server_uri,
network=self.network,
deployment_template="client-secure.deployment.yaml",
stats_port=self.client_port,
reuse_namespace=self.server_namespace == self.client_namespace,
debug_use_port_forwarding=self.debug_use_port_forwarding,
)
def startSecureTestServer(self, replica_count=1, **kwargs) -> XdsTestServer:
test_server = self.server_runner.run(
replica_count=replica_count,
test_port=self.server_port,
maintenance_port=self.server_maintenance_port,
secure_mode=True,
**kwargs,
)[0]
test_server.set_xds_address(self.server_xds_host, self.server_xds_port)
return test_server
def setupSecurityPolicies(
self, *, server_tls, server_mtls, client_tls, client_mtls
):
self.td.setup_client_security(
server_namespace=self.server_namespace,
server_name=self.server_name,
tls=client_tls,
mtls=client_mtls,
)
self.td.setup_server_security(
server_namespace=self.server_namespace,
server_name=self.server_name,
server_port=self.server_port,
tls=server_tls,
mtls=server_mtls,
)
def startSecureTestClient(
self,
test_server: XdsTestServer,
*,
wait_for_active_server_channel=True,
**kwargs,
) -> XdsTestClient:
test_client = self.client_runner.run(
server_target=test_server.xds_uri, secure_mode=True, **kwargs
)
if wait_for_active_server_channel:
test_client.wait_for_active_server_channel()
return test_client
def assertTestAppSecurity(
self,
mode: SecurityMode,
test_client: XdsTestClient,
test_server: XdsTestServer,
):
client_socket, server_socket = self.getConnectedSockets(
test_client, test_server
)
server_security: grpc_channelz.Security = server_socket.security
client_security: grpc_channelz.Security = client_socket.security
logger.info("Server certs: %s", self.debug_sock_certs(server_security))
logger.info("Client certs: %s", self.debug_sock_certs(client_security))
if mode is self.SecurityMode.MTLS:
self.assertSecurityMtls(client_security, server_security)
elif mode is self.SecurityMode.TLS:
self.assertSecurityTls(client_security, server_security)
elif mode is self.SecurityMode.PLAINTEXT:
self.assertSecurityPlaintext(client_security, server_security)
else:
raise TypeError("Incorrect security mode")
def assertSecurityMtls(
self,
client_security: grpc_channelz.Security,
server_security: grpc_channelz.Security,
):
self.assertEqual(
client_security.WhichOneof("model"),
"tls",
msg="(mTLS) Client socket security model must be TLS",
)
self.assertEqual(
server_security.WhichOneof("model"),
"tls",
msg="(mTLS) Server socket security model must be TLS",
)
server_tls, client_tls = server_security.tls, client_security.tls
# Confirm regular TLS: server local cert == client remote cert
self.assertNotEmpty(
client_tls.remote_certificate,
msg="(mTLS) Client remote certificate is missing",
)
if self.check_local_certs:
self.assertNotEmpty(
server_tls.local_certificate,
msg="(mTLS) Server local certificate is missing",
)
self.assertEqual(
server_tls.local_certificate,
client_tls.remote_certificate,
msg=(
"(mTLS) Server local certificate must match client's "
"remote certificate"
),
)
# mTLS: server remote cert == client local cert
self.assertNotEmpty(
server_tls.remote_certificate,
msg="(mTLS) Server remote certificate is missing",
)
if self.check_local_certs:
self.assertNotEmpty(
client_tls.local_certificate,
msg="(mTLS) Client local certificate is missing",
)
self.assertEqual(
server_tls.remote_certificate,
client_tls.local_certificate,
msg=(
"(mTLS) Server remote certificate must match client's "
"local certificate"
),
)
def assertSecurityTls(
self,
client_security: grpc_channelz.Security,
server_security: grpc_channelz.Security,
):
self.assertEqual(
client_security.WhichOneof("model"),
"tls",
msg="(TLS) Client socket security model must be TLS",
)
self.assertEqual(
server_security.WhichOneof("model"),
"tls",
msg="(TLS) Server socket security model must be TLS",
)
server_tls, client_tls = server_security.tls, client_security.tls
# Regular TLS: server local cert == client remote cert
self.assertNotEmpty(
client_tls.remote_certificate,
msg="(TLS) Client remote certificate is missing",
)
if self.check_local_certs:
self.assertNotEmpty(
server_tls.local_certificate,
msg="(TLS) Server local certificate is missing",
)
self.assertEqual(
server_tls.local_certificate,
client_tls.remote_certificate,
msg=(
"(TLS) Server local certificate must match client "
"remote certificate"
),
)
# mTLS must not be used
self.assertEmpty(
server_tls.remote_certificate,
msg=(
"(TLS) Server remote certificate must be empty in TLS mode. "
"Is server security incorrectly configured for mTLS?"
),
)
self.assertEmpty(
client_tls.local_certificate,
msg=(
"(TLS) Client local certificate must be empty in TLS mode. "
"Is client security incorrectly configured for mTLS?"
),
)
def assertSecurityPlaintext(self, client_security, server_security):
server_tls, client_tls = server_security.tls, client_security.tls
# Not TLS
self.assertEmpty(
server_tls.local_certificate,
msg="(Plaintext) Server local certificate must be empty.",
)
self.assertEmpty(
client_tls.local_certificate,
msg="(Plaintext) Client local certificate must be empty.",
)
# Not mTLS
self.assertEmpty(
server_tls.remote_certificate,
msg="(Plaintext) Server remote certificate must be empty.",
)
self.assertEmpty(
client_tls.local_certificate,
msg="(Plaintext) Client local certificate must be empty.",
)
def assertClientCannotReachServerRepeatedly(
self,
test_client: XdsTestClient,
*,
times: Optional[int] = None,
delay: Optional[_timedelta] = None,
):
"""
Asserts that the client repeatedly cannot reach the server.
With negative tests we can't be absolutely certain expected failure
state is not caused by something else.
To mitigate for this, we repeat the checks several times, and expect
all of them to succeed.
This is useful in case the channel eventually stabilizes, and RPCs pass.
Args:
test_client: An instance of XdsTestClient
times: Optional; A positive number of times to confirm that
the server is unreachable. Defaults to `3` attempts.
delay: Optional; Specifies how long to wait before the next check.
Defaults to `10` seconds.
"""
if times is None or times < 1:
times = 3
if delay is None:
delay = _timedelta(seconds=10)
for i in range(1, times + 1):
self.assertClientCannotReachServer(test_client)
if i < times:
logger.info(
"Check %s passed, waiting %s before the next check",
i,
delay,
)
time.sleep(delay.total_seconds())
def assertClientCannotReachServer(self, test_client: XdsTestClient):
self.assertClientChannelFailed(test_client)
self.assertFailedRpcs(test_client)
def assertClientChannelFailed(self, test_client: XdsTestClient):
channel = test_client.wait_for_server_channel_state(
state=_ChannelState.TRANSIENT_FAILURE
)
subchannels = list(
test_client.channelz.list_channel_subchannels(channel)
)
self.assertLen(
subchannels,
1,
msg=(
"Client channel must have exactly one subchannel "
"in state TRANSIENT_FAILURE."
),
)
@staticmethod
def getConnectedSockets(
test_client: XdsTestClient, test_server: XdsTestServer
) -> Tuple[grpc_channelz.Socket, grpc_channelz.Socket]:
client_sock = test_client.get_active_server_channel_socket()
server_sock = test_server.get_server_socket_matching_client(client_sock)
return client_sock, server_sock
@classmethod
def debug_sock_certs(cls, security: grpc_channelz.Security):
if security.WhichOneof("model") == "other":
return f"other: <{security.other.name}={security.other.value}>"
return (
f"local: <{cls.debug_cert(security.tls.local_certificate)}>, "
f"remote: <{cls.debug_cert(security.tls.remote_certificate)}>"
)
@staticmethod
def debug_cert(cert):
if not cert:
return "missing"
sha1 = hashlib.sha1(cert)
return f"sha1={sha1.hexdigest()}, len={len(cert)}"
| 42,800
| 36.348168
| 80
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/helpers/datetime.py
|
# Copyright 2021 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This contains common helpers for working with dates and time."""
import datetime
import re
from typing import Pattern
RE_ZERO_OFFSET: Pattern[str] = re.compile(r"[+\-]00:?00$")
def utc_now() -> datetime.datetime:
"""Construct a datetime from current time in UTC timezone."""
return datetime.datetime.now(datetime.timezone.utc)
def shorten_utc_zone(utc_datetime_str: str) -> str:
"""Replace ±00:00 timezone designator with Z (zero offset AKA Zulu time)."""
return RE_ZERO_OFFSET.sub("Z", utc_datetime_str)
def iso8601_utc_time(time: datetime.datetime = None) -> str:
"""Converts datetime UTC and formats as ISO-8601 Zulu time."""
utc_time = time.astimezone(tz=datetime.timezone.utc)
return shorten_utc_zone(utc_time.isoformat())
def datetime_suffix(*, seconds: bool = False) -> str:
"""Return current UTC date, and time in a format useful for resource naming.
Examples:
- 20210626-1859 (seconds=False)
- 20210626-185942 (seconds=True)
Use in resources names incompatible with ISO 8601, e.g. some GCP resources
that only allow lowercase alphanumeric chars and dashes.
Hours and minutes are joined together for better readability, so time is
visually distinct from dash-separated date.
"""
return utc_now().strftime("%Y%m%d-%H%M" + ("%S" if seconds else ""))
| 1,925
| 36.764706
| 80
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/helpers/skips.py
|
# Copyright 2022 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The classes and predicates to assist validate test config for test cases."""
from dataclasses import dataclass
import enum
import logging
import re
from typing import Callable, Optional
import unittest
from packaging import version as pkg_version
from framework import xds_flags
from framework import xds_k8s_flags
logger = logging.getLogger(__name__)
class Lang(enum.Flag):
UNKNOWN = enum.auto()
CPP = enum.auto()
GO = enum.auto()
JAVA = enum.auto()
PYTHON = enum.auto()
NODE = enum.auto()
def __str__(self):
return str(self.name).lower()
@classmethod
def from_string(cls, lang: str):
try:
return cls[lang.upper()]
except KeyError:
return cls.UNKNOWN
@dataclass
class TestConfig:
"""Describes the config for the test suite.
TODO(sergiitk): rename to LangSpec and rename skips.py to lang.py.
"""
client_lang: Lang
server_lang: Lang
version: Optional[str]
def version_gte(self, another: str) -> bool:
"""Returns a bool for whether this VERSION is >= then ANOTHER version.
Special cases:
1) Versions "master" or "dev" are always greater than ANOTHER:
- master > v1.999.x > v1.55.x
- dev > v1.999.x > v1.55.x
- dev == master
2) Versions "dev-VERSION" behave the same as the VERSION:
- dev-master > v1.999.x > v1.55.x
- dev-master == dev == master
- v1.55.x > dev-v1.54.x > v1.53.x
- dev-v1.54.x == v1.54.x
3) Unspecified version (self.version is None) is treated as "master".
"""
if self.version in ("master", "dev", "dev-master", None):
return True
if another == "master":
return False
return self._parse_version(self.version) >= self._parse_version(another)
def __str__(self):
return (
f"TestConfig(client_lang='{self.client_lang}', "
f"server_lang='{self.server_lang}', version={self.version!r})"
)
@staticmethod
def _parse_version(version: str) -> pkg_version.Version:
if version.startswith("dev-"):
# Treat "dev-VERSION" as "VERSION".
version = version[4:]
if version.endswith(".x"):
version = version[:-2]
return pkg_version.Version(version)
def _get_lang(image_name: str) -> Lang:
return Lang.from_string(
re.search(r"/(\w+)-(client|server):", image_name).group(1)
)
def evaluate_test_config(check: Callable[[TestConfig], bool]) -> TestConfig:
"""Evaluates the test config check against Abseil flags.
TODO(sergiitk): split into parse_lang_spec and check_is_supported.
"""
# NOTE(lidiz) a manual skip mechanism is needed because absl/flags
# cannot be used in the built-in test-skipping decorators. See the
# official FAQs:
# https://abseil.io/docs/python/guides/flags#faqs
test_config = TestConfig(
client_lang=_get_lang(xds_k8s_flags.CLIENT_IMAGE.value),
server_lang=_get_lang(xds_k8s_flags.SERVER_IMAGE.value),
version=xds_flags.TESTING_VERSION.value,
)
if not check(test_config):
logger.info("Skipping %s", test_config)
raise unittest.SkipTest(f"Unsupported test config: {test_config}")
logger.info("Detected language and version: %s", test_config)
return test_config
| 3,962
| 30.452381
| 80
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/helpers/retryers.py
|
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This contains common retrying helpers (retryers).
We use tenacity as a general-purpose retrying library.
> It [tenacity] originates from a fork of retrying which is sadly no
> longer maintained. Tenacity isn’t api compatible with retrying but >
> adds significant new functionality and fixes a number of longstanding bugs.
> - https://tenacity.readthedocs.io/en/latest/index.html
"""
import datetime
import logging
from typing import Any, Callable, List, Optional, Tuple, Type
import tenacity
from tenacity import _utils as tenacity_utils
from tenacity import compat as tenacity_compat
from tenacity import stop
from tenacity import wait
from tenacity.retry import retry_base
retryers_logger = logging.getLogger(__name__)
# Type aliases
timedelta = datetime.timedelta
Retrying = tenacity.Retrying
CheckResultFn = Callable[[Any], bool]
_ExceptionClasses = Tuple[Type[Exception], ...]
def _build_retry_conditions(
*,
retry_on_exceptions: Optional[_ExceptionClasses] = None,
check_result: Optional[CheckResultFn] = None,
) -> List[retry_base]:
# Retry on all exceptions by default
if retry_on_exceptions is None:
retry_on_exceptions = (Exception,)
retry_conditions = [tenacity.retry_if_exception_type(retry_on_exceptions)]
if check_result is not None:
if retry_on_exceptions:
# When retry_on_exceptions is set, also catch them while executing
# check_result callback.
check_result = _safe_check_result(check_result, retry_on_exceptions)
retry_conditions.append(tenacity.retry_if_not_result(check_result))
return retry_conditions
def exponential_retryer_with_timeout(
*,
wait_min: timedelta,
wait_max: timedelta,
timeout: timedelta,
retry_on_exceptions: Optional[_ExceptionClasses] = None,
check_result: Optional[CheckResultFn] = None,
logger: Optional[logging.Logger] = None,
log_level: Optional[int] = logging.DEBUG,
) -> Retrying:
if logger is None:
logger = retryers_logger
if log_level is None:
log_level = logging.DEBUG
retry_conditions = _build_retry_conditions(
retry_on_exceptions=retry_on_exceptions, check_result=check_result
)
retry_error_callback = _on_error_callback(
timeout=timeout, check_result=check_result
)
return Retrying(
retry=tenacity.retry_any(*retry_conditions),
wait=wait.wait_exponential(
min=wait_min.total_seconds(), max=wait_max.total_seconds()
),
stop=stop.stop_after_delay(timeout.total_seconds()),
before_sleep=_before_sleep_log(logger, log_level),
retry_error_callback=retry_error_callback,
)
def constant_retryer(
*,
wait_fixed: timedelta,
attempts: int = 0,
timeout: Optional[timedelta] = None,
retry_on_exceptions: Optional[_ExceptionClasses] = None,
check_result: Optional[CheckResultFn] = None,
logger: Optional[logging.Logger] = None,
log_level: Optional[int] = logging.DEBUG,
) -> Retrying:
if logger is None:
logger = retryers_logger
if log_level is None:
log_level = logging.DEBUG
if attempts < 1 and timeout is None:
raise ValueError("The number of attempts or the timeout must be set")
stops = []
if attempts > 0:
stops.append(stop.stop_after_attempt(attempts))
if timeout is not None:
stops.append(stop.stop_after_delay(timeout.total_seconds()))
retry_conditions = _build_retry_conditions(
retry_on_exceptions=retry_on_exceptions, check_result=check_result
)
retry_error_callback = _on_error_callback(
timeout=timeout, attempts=attempts, check_result=check_result
)
return Retrying(
retry=tenacity.retry_any(*retry_conditions),
wait=wait.wait_fixed(wait_fixed.total_seconds()),
stop=stop.stop_any(*stops),
before_sleep=_before_sleep_log(logger, log_level),
retry_error_callback=retry_error_callback,
)
def _on_error_callback(
*,
timeout: Optional[timedelta] = None,
attempts: int = 0,
check_result: Optional[CheckResultFn] = None,
):
"""A helper to propagate the initial state to the RetryError, so that
it can assemble a helpful message containing timeout/number of attempts.
"""
def error_handler(retry_state: tenacity.RetryCallState):
raise RetryError(
retry_state,
timeout=timeout,
attempts=attempts,
check_result=check_result,
)
return error_handler
def _safe_check_result(
check_result: CheckResultFn, retry_on_exceptions: _ExceptionClasses
) -> CheckResultFn:
"""Wraps check_result callback to catch and handle retry_on_exceptions.
Normally tenacity doesn't retry when retry_if_result/retry_if_not_result
raise an error. This wraps the callback to automatically catch Exceptions
specified in the retry_on_exceptions argument.
Ideally we should make all check_result callbacks to not throw, but
in case it does, we'd rather be annoying in the logs, than break the test.
"""
def _check_result_wrapped(result):
try:
return check_result(result)
except retry_on_exceptions:
retryers_logger.warning(
(
"Result check callback %s raised an exception."
"This shouldn't happen, please handle any exceptions and "
"return return a boolean."
),
tenacity_utils.get_callback_name(check_result),
exc_info=True,
)
return False
return _check_result_wrapped
def _before_sleep_log(logger, log_level, exc_info=False):
"""Same as tenacity.before_sleep_log, but only logs primitive return values.
This is not useful when the return value is a dump of a large object.
"""
def log_it(retry_state):
if retry_state.outcome.failed:
ex = retry_state.outcome.exception()
verb, value = "raised", "%s: %s" % (type(ex).__name__, ex)
if exc_info:
local_exc_info = tenacity_compat.get_exc_info_from_future(
retry_state.outcome
)
else:
local_exc_info = False
else:
local_exc_info = False # exc_info does not apply when no exception
result = retry_state.outcome.result()
if isinstance(result, (int, bool, str)):
verb, value = "returned", result
else:
verb, value = "returned type", type(result)
logger.log(
log_level,
"Retrying %s in %s seconds as it %s %s.",
tenacity_utils.get_callback_name(retry_state.fn),
getattr(retry_state.next_action, "sleep"),
verb,
value,
exc_info=local_exc_info,
)
return log_it
class RetryError(tenacity.RetryError):
def __init__(
self,
retry_state,
*,
timeout: Optional[timedelta] = None,
attempts: int = 0,
check_result: Optional[CheckResultFn] = None,
):
super().__init__(retry_state.outcome)
callback_name = tenacity_utils.get_callback_name(retry_state.fn)
self.message = f"Retry error calling {callback_name}:"
if timeout:
self.message += f" timeout {timeout} (h:mm:ss) exceeded"
if attempts:
self.message += " or"
if attempts:
self.message += f" {attempts} attempts exhausted"
self.message += "."
if retry_state.outcome.failed:
ex = retry_state.outcome.exception()
self.message += f" Last exception: {type(ex).__name__}: {ex}"
elif check_result:
self.message += " Check result callback returned False."
def result(self, *, default=None):
return (
default if self.last_attempt.failed else self.last_attempt.result()
)
def __str__(self):
return self.message
| 8,657
| 33.221344
| 80
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/helpers/rand.py
|
# Copyright 2021 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This contains common helpers for generating randomized data."""
import random
import string
import framework.helpers.datetime
# Alphanumeric characters, similar to regex [:alnum:] class, [a-zA-Z0-9]
ALPHANUM = string.ascii_letters + string.digits
# Lowercase alphanumeric characters: [a-z0-9]
# Use ALPHANUM_LOWERCASE alphabet when case-sensitivity is a concern.
ALPHANUM_LOWERCASE = string.ascii_lowercase + string.digits
def rand_string(length: int = 8, *, lowercase: bool = False) -> str:
"""Return random alphanumeric string of given length.
Space for default arguments: alphabet^length
lowercase and uppercase = (26*2 + 10)^8 = 2.18e14 = 218 trillion.
lowercase only = (26 + 10)^8 = 2.8e12 = 2.8 trillion.
"""
alphabet = ALPHANUM_LOWERCASE if lowercase else ALPHANUM
return "".join(random.choices(population=alphabet, k=length))
def random_resource_suffix() -> str:
"""Return a ready-to-use resource suffix with datetime and nonce."""
# Date and time suffix for debugging. Seconds skipped, not as relevant
# Format example: 20210626-1859
datetime_suffix: str = framework.helpers.datetime.datetime_suffix()
# Use lowercase chars because some resource names won't allow uppercase.
# For len 5, total (26 + 10)^5 = 60,466,176 combinations.
# Approx. number of test runs needed to start at the same minute to
# produce a collision: math.sqrt(math.pi/2 * (26+10)**5) ≈ 9745.
# https://en.wikipedia.org/wiki/Birthday_attack#Mathematics
unique_hash: str = rand_string(5, lowercase=True)
return f"{datetime_suffix}-{unique_hash}"
| 2,192
| 42.86
| 76
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/helpers/grpc.py
|
# Copyright 2023 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This contains common helpers for working with grpc data structures."""
import dataclasses
import functools
from typing import Dict, List, Optional
import grpc
import yaml
from framework.rpc import grpc_testing
# Type aliases
RpcsByPeer: Dict[str, int]
@functools.cache # pylint: disable=no-member
def status_from_int(grpc_status_int: int) -> Optional[grpc.StatusCode]:
"""Converts the integer gRPC status code to the grpc.StatusCode enum."""
for grpc_status in grpc.StatusCode:
if grpc_status.value[0] == grpc_status_int:
return grpc_status
return None
def status_eq(grpc_status_int: int, grpc_status: grpc.StatusCode) -> bool:
"""Compares the integer gRPC status code with the grpc.StatusCode enum."""
return status_from_int(grpc_status_int) is grpc_status
def status_pretty(grpc_status: grpc.StatusCode) -> str:
"""Formats the status code as (int, NAME), f.e. (4, DEADLINE_EXCEEDED)"""
return f"({grpc_status.value[0]}, {grpc_status.name})"
@dataclasses.dataclass(frozen=True)
class PrettyStatsPerMethod:
# The name of the method.
method: str
# The number of RPCs started for this method, completed and in-flight.
rpcs_started: int
# The number of RPCs that completed with each status for this method.
# Format: status code -> RPC count, f.e.:
# {
# "(0, OK)": 20,
# "(14, UNAVAILABLE)": 10
# }
result: Dict[str, int]
@functools.cached_property # pylint: disable=no-member
def rpcs_completed(self):
"""Returns the total count of competed RPCs across all statuses."""
return sum(self.result.values())
@staticmethod
def from_response(
method_name: str, method_stats: grpc_testing.MethodStats
) -> "PrettyStatsPerMethod":
stats: Dict[str, int] = dict()
for status_int, count in method_stats.result.items():
status: Optional[grpc.StatusCode] = status_from_int(status_int)
status_formatted = status_pretty(status) if status else "None"
stats[status_formatted] = count
return PrettyStatsPerMethod(
method=method_name,
rpcs_started=method_stats.rpcs_started,
result=stats,
)
def accumulated_stats_pretty(
accumulated_stats: grpc_testing.LoadBalancerAccumulatedStatsResponse,
*,
ignore_empty: bool = False,
) -> str:
"""Pretty print LoadBalancerAccumulatedStatsResponse.
Example:
- method: EMPTY_CALL
rpcs_started: 0
result:
(2, UNKNOWN): 20
- method: UNARY_CALL
rpcs_started: 31
result:
(0, OK): 10
(14, UNAVAILABLE): 20
"""
# Only look at stats_per_method, as the other fields are deprecated.
result: List[Dict] = []
for method_name, method_stats in accumulated_stats.stats_per_method.items():
pretty_stats = PrettyStatsPerMethod.from_response(
method_name, method_stats
)
# Skip methods with no RPCs reported when ignore_empty is True.
if ignore_empty and not pretty_stats.rpcs_started:
continue
result.append(dataclasses.asdict(pretty_stats))
return yaml.dump(result, sort_keys=False)
@dataclasses.dataclass(frozen=True)
class PrettyLoadBalancerStats:
# The number of RPCs that failed to record a remote peer.
num_failures: int
# The number of completed RPCs for each peer.
# Format: a dictionary from the host name (str) to the RPC count (int), f.e.
# {"host-a": 10, "host-b": 20}
rpcs_by_peer: "RpcsByPeer"
# The number of completed RPCs per method per each pear.
# Format: a dictionary from the method name to RpcsByPeer (see above), f.e.:
# {
# "UNARY_CALL": {"host-a": 10, "host-b": 20},
# "EMPTY_CALL": {"host-a": 42},
# }
rpcs_by_method: Dict[str, "RpcsByPeer"]
@staticmethod
def _parse_rpcs_by_peer(
rpcs_by_peer: grpc_testing.RpcsByPeer,
) -> "RpcsByPeer":
result = dict()
for peer, count in rpcs_by_peer.items():
result[peer] = count
return result
@classmethod
def from_response(
cls, lb_stats: grpc_testing.LoadBalancerStatsResponse
) -> "PrettyLoadBalancerStats":
rpcs_by_method: Dict[str, "RpcsByPeer"] = dict()
for method_name, stats in lb_stats.rpcs_by_method.items():
if stats:
rpcs_by_method[method_name] = cls._parse_rpcs_by_peer(
stats.rpcs_by_peer
)
return PrettyLoadBalancerStats(
num_failures=lb_stats.num_failures,
rpcs_by_peer=cls._parse_rpcs_by_peer(lb_stats.rpcs_by_peer),
rpcs_by_method=rpcs_by_method,
)
def lb_stats_pretty(lb: grpc_testing.LoadBalancerStatsResponse) -> str:
"""Pretty print LoadBalancerStatsResponse.
Example:
num_failures: 13
rpcs_by_method:
UNARY_CALL:
psm-grpc-server-a: 100
psm-grpc-server-b: 42
EMPTY_CALL:
psm-grpc-server-a: 200
rpcs_by_peer:
psm-grpc-server-a: 200
psm-grpc-server-b: 42
"""
pretty_lb_stats = PrettyLoadBalancerStats.from_response(lb)
return yaml.dump(dataclasses.asdict(pretty_lb_stats), sort_keys=False)
| 5,869
| 32.163842
| 80
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/helpers/highlighter.py
|
# Copyright 2021 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The module contains helpers to enable color output in terminals.
Use this to log resources dumped as a structured document (f.e. YAML),
and enable colorful syntax highlighting.
TODO(sergiitk): This can be used to output protobuf responses formatted as JSON.
"""
import logging
from typing import Optional
from absl import flags
import pygments
import pygments.formatter
import pygments.formatters.other
import pygments.formatters.terminal
import pygments.formatters.terminal256
import pygments.lexer
import pygments.lexers.data
import pygments.styles
# The style for terminals supporting 8/16 colors.
STYLE_ANSI_16 = "ansi16"
# Join with pygments styles for terminals supporting 88/256 colors.
ALL_COLOR_STYLES = [STYLE_ANSI_16] + list(pygments.styles.get_all_styles())
# Flags.
COLOR = flags.DEFINE_bool("color", default=True, help="Colorize the output")
COLOR_STYLE = flags.DEFINE_enum(
"color_style",
default="material",
enum_values=ALL_COLOR_STYLES,
help=(
"Color styles for terminals supporting 256 colors. "
f"Use {STYLE_ANSI_16} style for terminals supporting 8/16 colors"
),
)
logger = logging.getLogger(__name__)
# Type aliases.
Lexer = pygments.lexer.Lexer
YamlLexer = pygments.lexers.data.YamlLexer
Formatter = pygments.formatter.Formatter
NullFormatter = pygments.formatters.other.NullFormatter
TerminalFormatter = pygments.formatters.terminal.TerminalFormatter
Terminal256Formatter = pygments.formatters.terminal256.Terminal256Formatter
class Highlighter:
formatter: Formatter
lexer: Lexer
color: bool
color_style: Optional[str] = None
def __init__(
self,
*,
lexer: Lexer,
color: Optional[bool] = None,
color_style: Optional[str] = None,
):
self.lexer = lexer
self.color = color if color is not None else COLOR.value
if self.color:
color_style = color_style if color_style else COLOR_STYLE.value
if color_style not in ALL_COLOR_STYLES:
raise ValueError(
f"Unrecognized color style {color_style}, "
f"valid styles: {ALL_COLOR_STYLES}"
)
if color_style == STYLE_ANSI_16:
# 8/16 colors support only.
self.formatter = TerminalFormatter()
else:
# 88/256 colors.
self.formatter = Terminal256Formatter(style=color_style)
else:
self.formatter = NullFormatter()
def highlight(self, code: str) -> str:
return pygments.highlight(code, self.lexer, self.formatter)
class HighlighterYaml(Highlighter):
def __init__(
self, *, color: Optional[bool] = None, color_style: Optional[str] = None
):
super().__init__(
lexer=YamlLexer(encoding="utf-8"),
color=color,
color_style=color_style,
)
| 3,486
| 31.588785
| 80
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/helpers/__init__.py
|
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 577
| 40.285714
| 74
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/helpers/logs.py
|
# Copyright 2022 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The module contains helpers to initialize and configure logging."""
import functools
import pathlib
from absl import flags
from absl import logging
def _ensure_flags_parsed() -> None:
if not flags.FLAGS.is_parsed():
raise flags.UnparsedFlagAccessError("Must initialize absl flags first.")
@functools.lru_cache(None)
def log_get_root_dir() -> pathlib.Path:
_ensure_flags_parsed()
log_root = pathlib.Path(logging.find_log_dir()).absolute()
logging.info("Log root dir: %s", log_root)
return log_root
def log_dir_mkdir(name: str) -> pathlib.Path:
"""Creates and returns a subdir with the given name in the log folder."""
if len(pathlib.Path(name).parts) != 1:
raise ValueError(f"Dir name must be a single component; got: {name}")
if ".." in name:
raise ValueError(f"Dir name must not be above the log root.")
log_subdir = log_get_root_dir() / name
if log_subdir.exists() and log_subdir.is_dir():
logging.debug("Using existing log subdir: %s", log_subdir)
else:
log_subdir.mkdir()
logging.debug("Created log subdir: %s", log_subdir)
return log_subdir
| 1,730
| 34.326531
| 80
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/test_app/client_app.py
|
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Provides an interface to xDS Test Client running remotely.
"""
import datetime
import functools
import logging
from typing import Iterable, List, Optional
from framework.helpers import retryers
import framework.rpc
from framework.rpc import grpc_channelz
from framework.rpc import grpc_csds
from framework.rpc import grpc_testing
logger = logging.getLogger(__name__)
# Type aliases
_timedelta = datetime.timedelta
_LoadBalancerStatsServiceClient = grpc_testing.LoadBalancerStatsServiceClient
_XdsUpdateClientConfigureServiceClient = (
grpc_testing.XdsUpdateClientConfigureServiceClient
)
_ChannelzServiceClient = grpc_channelz.ChannelzServiceClient
_ChannelzChannel = grpc_channelz.Channel
_ChannelzChannelState = grpc_channelz.ChannelState
_ChannelzSubchannel = grpc_channelz.Subchannel
_ChannelzSocket = grpc_channelz.Socket
_CsdsClient = grpc_csds.CsdsClient
class XdsTestClient(framework.rpc.grpc.GrpcApp):
"""
Represents RPC services implemented in Client component of the xds test app.
https://github.com/grpc/grpc/blob/master/doc/xds-test-descriptions.md#client
"""
# A unique string identifying each client replica. Used in logging.
hostname: str
def __init__(
self,
*,
ip: str,
rpc_port: int,
server_target: str,
hostname: str,
rpc_host: Optional[str] = None,
maintenance_port: Optional[int] = None,
):
super().__init__(rpc_host=(rpc_host or ip))
self.ip = ip
self.rpc_port = rpc_port
self.server_target = server_target
self.maintenance_port = maintenance_port or rpc_port
self.hostname = hostname
@property
@functools.lru_cache(None)
def load_balancer_stats(self) -> _LoadBalancerStatsServiceClient:
return _LoadBalancerStatsServiceClient(
self._make_channel(self.rpc_port),
log_target=f"{self.hostname}:{self.rpc_port}",
)
@property
@functools.lru_cache(None)
def update_config(self):
return _XdsUpdateClientConfigureServiceClient(
self._make_channel(self.rpc_port),
log_target=f"{self.hostname}:{self.rpc_port}",
)
@property
@functools.lru_cache(None)
def channelz(self) -> _ChannelzServiceClient:
return _ChannelzServiceClient(
self._make_channel(self.maintenance_port),
log_target=f"{self.hostname}:{self.maintenance_port}",
)
@property
@functools.lru_cache(None)
def csds(self) -> _CsdsClient:
return _CsdsClient(
self._make_channel(self.maintenance_port),
log_target=f"{self.hostname}:{self.maintenance_port}",
)
def get_load_balancer_stats(
self,
*,
num_rpcs: int,
timeout_sec: Optional[int] = None,
) -> grpc_testing.LoadBalancerStatsResponse:
"""
Shortcut to LoadBalancerStatsServiceClient.get_client_stats()
"""
return self.load_balancer_stats.get_client_stats(
num_rpcs=num_rpcs, timeout_sec=timeout_sec
)
def get_load_balancer_accumulated_stats(
self,
*,
timeout_sec: Optional[int] = None,
) -> grpc_testing.LoadBalancerAccumulatedStatsResponse:
"""Shortcut to LoadBalancerStatsServiceClient.get_client_accumulated_stats()"""
return self.load_balancer_stats.get_client_accumulated_stats(
timeout_sec=timeout_sec
)
def wait_for_active_server_channel(self) -> _ChannelzChannel:
"""Wait for the channel to the server to transition to READY.
Raises:
GrpcApp.NotFound: If the channel never transitioned to READY.
"""
return self.wait_for_server_channel_state(_ChannelzChannelState.READY)
def get_active_server_channel_socket(self) -> _ChannelzSocket:
channel = self.find_server_channel_with_state(
_ChannelzChannelState.READY
)
# Get the first subchannel of the active channel to the server.
logger.debug(
(
"[%s] Retrieving client -> server socket, "
"channel_id: %s, subchannel: %s"
),
self.hostname,
channel.ref.channel_id,
channel.subchannel_ref[0].name,
)
subchannel, *subchannels = list(
self.channelz.list_channel_subchannels(channel)
)
if subchannels:
logger.warning(
"[%s] Unexpected subchannels: %r", self.hostname, subchannels
)
# Get the first socket of the subchannel
socket, *sockets = list(
self.channelz.list_subchannels_sockets(subchannel)
)
if sockets:
logger.warning(
"[%s] Unexpected sockets: %r", self.hostname, subchannels
)
logger.debug(
"[%s] Found client -> server socket: %s",
self.hostname,
socket.ref.name,
)
return socket
def wait_for_server_channel_state(
self,
state: _ChannelzChannelState,
*,
timeout: Optional[_timedelta] = None,
rpc_deadline: Optional[_timedelta] = None,
) -> _ChannelzChannel:
# When polling for a state, prefer smaller wait times to avoid
# exhausting all allowed time on a single long RPC.
if rpc_deadline is None:
rpc_deadline = _timedelta(seconds=30)
# Fine-tuned to wait for the channel to the server.
retryer = retryers.exponential_retryer_with_timeout(
wait_min=_timedelta(seconds=10),
wait_max=_timedelta(seconds=25),
timeout=_timedelta(minutes=5) if timeout is None else timeout,
)
logger.info(
"[%s] Waiting to report a %s channel to %s",
self.hostname,
_ChannelzChannelState.Name(state),
self.server_target,
)
channel = retryer(
self.find_server_channel_with_state,
state,
rpc_deadline=rpc_deadline,
)
logger.info(
"[%s] Channel to %s transitioned to state %s: %s",
self.hostname,
self.server_target,
_ChannelzChannelState.Name(state),
_ChannelzServiceClient.channel_repr(channel),
)
return channel
def find_server_channel_with_state(
self,
state: _ChannelzChannelState,
*,
rpc_deadline: Optional[_timedelta] = None,
check_subchannel=True,
) -> _ChannelzChannel:
rpc_params = {}
if rpc_deadline is not None:
rpc_params["deadline_sec"] = rpc_deadline.total_seconds()
for channel in self.get_server_channels(**rpc_params):
channel_state: _ChannelzChannelState = channel.data.state.state
logger.info(
"[%s] Server channel: %s",
self.hostname,
_ChannelzServiceClient.channel_repr(channel),
)
if channel_state is state:
if check_subchannel:
# When requested, check if the channel has at least
# one subchannel in the requested state.
try:
subchannel = self.find_subchannel_with_state(
channel, state, **rpc_params
)
logger.info(
"[%s] Found subchannel in state %s: %s",
self.hostname,
_ChannelzChannelState.Name(state),
_ChannelzServiceClient.subchannel_repr(subchannel),
)
except self.NotFound as e:
# Otherwise, keep searching.
logger.info(e.message)
continue
return channel
raise self.NotFound(
f"[{self.hostname}] Client has no "
f"{_ChannelzChannelState.Name(state)} channel with the server"
)
def get_server_channels(self, **kwargs) -> Iterable[_ChannelzChannel]:
return self.channelz.find_channels_for_target(
self.server_target, **kwargs
)
def find_subchannel_with_state(
self, channel: _ChannelzChannel, state: _ChannelzChannelState, **kwargs
) -> _ChannelzSubchannel:
subchannels = self.channelz.list_channel_subchannels(channel, **kwargs)
for subchannel in subchannels:
if subchannel.data.state.state is state:
return subchannel
raise self.NotFound(
f"[{self.hostname}] Not found "
f"a {_ChannelzChannelState.Name(state)} subchannel "
f"for channel_id {channel.ref.channel_id}"
)
def find_subchannels_with_state(
self, state: _ChannelzChannelState, **kwargs
) -> List[_ChannelzSubchannel]:
subchannels = []
for channel in self.channelz.find_channels_for_target(
self.server_target, **kwargs
):
for subchannel in self.channelz.list_channel_subchannels(
channel, **kwargs
):
if subchannel.data.state.state is state:
subchannels.append(subchannel)
return subchannels
| 9,953
| 34.173145
| 87
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/test_app/server_app.py
|
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Provides an interface to xDS Test Server running remotely.
"""
import functools
import logging
from typing import Iterator, Optional
import framework.rpc
from framework.rpc import grpc_channelz
from framework.rpc import grpc_testing
logger = logging.getLogger(__name__)
# Type aliases
_ChannelzServiceClient = grpc_channelz.ChannelzServiceClient
_XdsUpdateHealthServiceClient = grpc_testing.XdsUpdateHealthServiceClient
_HealthClient = grpc_testing.HealthClient
class XdsTestServer(framework.rpc.grpc.GrpcApp):
"""
Represents RPC services implemented in Server component of the xDS test app.
https://github.com/grpc/grpc/blob/master/doc/xds-test-descriptions.md#server
"""
# A unique host name identifying each server replica.
# Server implementation must return this in the SimpleResponse.hostname,
# which client uses as the key in rpcs_by_peer map.
hostname: str
def __init__(
self,
*,
ip: str,
rpc_port: int,
hostname: str,
maintenance_port: Optional[int] = None,
secure_mode: Optional[bool] = False,
xds_host: Optional[str] = None,
xds_port: Optional[int] = None,
rpc_host: Optional[str] = None,
):
super().__init__(rpc_host=(rpc_host or ip))
self.ip = ip
self.rpc_port = rpc_port
self.hostname = hostname
self.maintenance_port = maintenance_port or rpc_port
self.secure_mode = secure_mode
self.xds_host, self.xds_port = xds_host, xds_port
@property
@functools.lru_cache(None)
def channelz(self) -> _ChannelzServiceClient:
return _ChannelzServiceClient(
self._make_channel(self.maintenance_port),
log_target=f"{self.hostname}:{self.maintenance_port}",
)
@property
@functools.lru_cache(None)
def update_health_service_client(self) -> _XdsUpdateHealthServiceClient:
return _XdsUpdateHealthServiceClient(
self._make_channel(self.maintenance_port),
log_target=f"{self.hostname}:{self.maintenance_port}",
)
@property
@functools.lru_cache(None)
def health_client(self) -> _HealthClient:
return _HealthClient(
self._make_channel(self.maintenance_port),
log_target=f"{self.hostname}:{self.maintenance_port}",
)
def set_serving(self):
logger.info("[%s] >> Setting health status to SERVING", self.hostname)
self.update_health_service_client.set_serving()
logger.info(
"[%s] << Health status %s",
self.hostname,
self.health_client.check_health(),
)
def set_not_serving(self):
logger.info(
"[%s] >> Setting health status to NOT_SERVING", self.hostname
)
self.update_health_service_client.set_not_serving()
logger.info(
"[%s] << Health status %s",
self.hostname,
self.health_client.check_health(),
)
def set_xds_address(self, xds_host, xds_port: Optional[int] = None):
self.xds_host, self.xds_port = xds_host, xds_port
@property
def xds_address(self) -> str:
if not self.xds_host:
return ""
if not self.xds_port:
return self.xds_host
return f"{self.xds_host}:{self.xds_port}"
@property
def xds_uri(self) -> str:
if not self.xds_host:
return ""
return f"xds:///{self.xds_address}"
def get_test_server(self) -> grpc_channelz.Server:
"""Return channelz representation of a server running TestService.
Raises:
GrpcApp.NotFound: Test server not found.
"""
server = self.channelz.find_server_listening_on_port(self.rpc_port)
if not server:
raise self.NotFound(
f"[{self.hostname}] Server"
f"listening on port {self.rpc_port} not found"
)
return server
def get_test_server_sockets(self) -> Iterator[grpc_channelz.Socket]:
"""List all sockets of the test server.
Raises:
GrpcApp.NotFound: Test server not found.
"""
server = self.get_test_server()
return self.channelz.list_server_sockets(server)
def get_server_socket_matching_client(
self, client_socket: grpc_channelz.Socket
):
"""Find test server socket that matches given test client socket.
Sockets are matched using TCP endpoints (ip:port), further on "address".
Server socket remote address matched with client socket local address.
Raises:
GrpcApp.NotFound: Server socket matching client socket not found.
"""
client_local = self.channelz.sock_address_to_str(client_socket.local)
logger.debug(
"[%s] Looking for a server socket connected to the client %s",
self.hostname,
client_local,
)
server_socket = self.channelz.find_server_socket_matching_client(
self.get_test_server_sockets(), client_socket
)
if not server_socket:
raise self.NotFound(
f"[{self.hostname}] Socket to client {client_local} not found"
)
logger.info(
"[%s] Found matching socket pair: server(%s) <-> client(%s)",
self.hostname,
self.channelz.sock_addresses_pretty(server_socket),
self.channelz.sock_addresses_pretty(client_socket),
)
return server_socket
| 6,129
| 32.867403
| 80
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/test_app/__init__.py
|
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 577
| 40.285714
| 74
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/test_app/runners/__init__.py
|
# Copyright 2022 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 577
| 40.285714
| 74
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/test_app/runners/base_runner.py
|
# Copyright 2022 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Common functionality for running xDS Test Client and Server remotely.
"""
from abc import ABCMeta
from abc import abstractmethod
import functools
import pathlib
import threading
from typing import Dict, Optional
import urllib.parse
from absl import flags
from framework import xds_flags
from framework.helpers import logs
flags.adopt_module_key_flags(logs)
_LOGS_SUBDIR = "test_app_logs"
class RunnerError(Exception):
"""Error running xDS Test App running remotely."""
class BaseRunner(metaclass=ABCMeta):
_logs_subdir: Optional[pathlib.Path] = None
_log_stop_event: Optional[threading.Event] = None
def __init__(self):
if xds_flags.COLLECT_APP_LOGS.value:
self._logs_subdir = logs.log_dir_mkdir(_LOGS_SUBDIR)
self._log_stop_event = threading.Event()
@property
@functools.lru_cache(None)
def should_collect_logs(self) -> bool:
return self._logs_subdir is not None
@property
@functools.lru_cache(None)
def logs_subdir(self) -> pathlib.Path:
if not self.should_collect_logs:
raise FileNotFoundError("Log collection is not enabled.")
return self._logs_subdir
@property
def log_stop_event(self) -> threading.Event:
if not self.should_collect_logs:
raise ValueError("Log collection is not enabled.")
return self._log_stop_event
def maybe_stop_logging(self):
if self.should_collect_logs and not self.log_stop_event.is_set():
self.log_stop_event.set()
@abstractmethod
def run(self, **kwargs):
pass
@abstractmethod
def cleanup(self, *, force=False):
pass
@classmethod
def _logs_explorer_link_from_params(
cls,
*,
gcp_ui_url: str,
gcp_project: str,
query: Dict[str, str],
request: Optional[Dict[str, str]] = None,
) -> str:
req_merged = {"query": cls._logs_explorer_query(query)}
if request is not None:
req_merged.update(request)
req = cls._logs_explorer_request(req_merged)
return f"https://{gcp_ui_url}/logs/query;{req}?project={gcp_project}"
@classmethod
def _logs_explorer_query(cls, query: Dict[str, str]) -> str:
return "\n".join(f'{k}="{v}"' for k, v in query.items())
@classmethod
def _logs_explorer_request(cls, req: Dict[str, str]) -> str:
return ";".join(
f"{k}={cls._logs_explorer_quote(v)}" for k, v in req.items()
)
@classmethod
def _logs_explorer_quote(cls, value: str) -> str:
return urllib.parse.quote_plus(value, safe=":")
| 3,205
| 29.245283
| 77
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/test_app/runners/k8s/k8s_base_runner.py
|
# Copyright 2022 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Common functionality for running xDS Test Client and Server on Kubernetes.
"""
from abc import ABCMeta
import contextlib
import dataclasses
import datetime
import logging
import pathlib
from typing import List, Optional
import mako.template
import yaml
from framework.helpers import retryers
import framework.helpers.datetime
import framework.helpers.highlighter
import framework.helpers.rand
from framework.infrastructure import gcp
from framework.infrastructure import k8s
from framework.test_app.runners import base_runner
logger = logging.getLogger(__name__)
# Type aliases
_RunnerError = base_runner.RunnerError
_HighlighterYaml = framework.helpers.highlighter.HighlighterYaml
_helper_datetime = framework.helpers.datetime
_datetime = datetime.datetime
_timedelta = datetime.timedelta
@dataclasses.dataclass(frozen=True)
class RunHistory:
deployment_id: str
time_start_requested: _datetime
time_start_completed: Optional[_datetime]
time_stopped: _datetime
class KubernetesBaseRunner(base_runner.BaseRunner, metaclass=ABCMeta):
# Pylint wants abstract classes to override abstract methods.
# pylint: disable=abstract-method
TEMPLATE_DIR_NAME = "kubernetes-manifests"
TEMPLATE_DIR_RELATIVE_PATH = f"../../../../{TEMPLATE_DIR_NAME}"
ROLE_WORKLOAD_IDENTITY_USER = "roles/iam.workloadIdentityUser"
pod_port_forwarders: List[k8s.PortForwarder]
pod_log_collectors: List[k8s.PodLogCollector]
# Required fields.
k8s_namespace: k8s.KubernetesNamespace
deployment_name: str
image_name: str
gcp_project: str
gcp_service_account: str
gcp_ui_url: str
# Fields with default values.
namespace_template: str = "namespace.yaml"
reuse_namespace: bool = False
# Mutable state. Describes the current run.
namespace: Optional[k8s.V1Namespace] = None
deployment: Optional[k8s.V1Deployment] = None
deployment_id: Optional[str] = None
service_account: Optional[k8s.V1ServiceAccount] = None
time_start_requested: Optional[_datetime] = None
time_start_completed: Optional[_datetime] = None
time_stopped: Optional[_datetime] = None
# The history of all runs performed by this runner.
run_history: List[RunHistory]
def __init__(
self,
k8s_namespace: k8s.KubernetesNamespace,
*,
deployment_name: str,
image_name: str,
gcp_project: str,
gcp_service_account: str,
gcp_ui_url: str,
namespace_template: Optional[str] = "namespace.yaml",
reuse_namespace: bool = False,
):
super().__init__()
# Required fields.
self.deployment_name = deployment_name
self.image_name = image_name
self.gcp_project = gcp_project
# Maps GCP service account to Kubernetes service account
self.gcp_service_account = gcp_service_account
self.gcp_ui_url = gcp_ui_url
# Kubernetes namespace resources manager.
self.k8s_namespace = k8s_namespace
if namespace_template:
self.namespace_template = namespace_template
self.reuse_namespace = reuse_namespace
# Mutable state
self.run_history = []
self.pod_port_forwarders = []
self.pod_log_collectors = []
# Highlighter.
self._highlighter = _HighlighterYaml()
def run(self, **kwargs):
del kwargs
if not self.time_stopped and self.time_start_requested:
if self.time_start_completed:
raise RuntimeError(
f"Deployment {self.deployment_name}: has already been"
f" started at {self.time_start_completed.isoformat()}"
)
else:
raise RuntimeError(
f"Deployment {self.deployment_name}: start has already been"
f" requested at {self.time_start_requested.isoformat()}"
)
self._reset_state()
self.time_start_requested = _datetime.now()
self.logs_explorer_link()
if self.reuse_namespace:
self.namespace = self._reuse_namespace()
if not self.namespace:
self.namespace = self._create_namespace(
self.namespace_template, namespace_name=self.k8s_namespace.name
)
def _start_completed(self):
self.time_start_completed = _datetime.now()
def _stop(self):
self.time_stopped = _datetime.now()
if self.time_start_requested and self.deployment_id:
run_history = RunHistory(
deployment_id=self.deployment_id,
time_start_requested=self.time_start_requested,
time_start_completed=self.time_start_completed,
time_stopped=self.time_stopped,
)
self.run_history.append(run_history)
def _reset_state(self):
"""Reset the mutable state of the previous run."""
if self.pod_port_forwarders:
logger.warning(
"Port forwarders weren't cleaned up from the past run: %s",
len(self.pod_port_forwarders),
)
if self.pod_log_collectors:
logger.warning(
"Pod log collectors weren't cleaned up from the past run: %s",
len(self.pod_log_collectors),
)
self.namespace = None
self.deployment = None
self.deployment_id = None
self.service_account = None
self.time_start_requested = None
self.time_start_completed = None
self.time_stopped = None
self.pod_port_forwarders = []
self.pod_log_collectors = []
def _cleanup_namespace(self, *, force=False):
if (self.namespace and not self.reuse_namespace) or force:
self.delete_namespace()
self.namespace = None
def stop_pod_dependencies(self, *, log_drain_sec: int = 0):
# Signal to stop logging early so less drain time needed.
self.maybe_stop_logging()
# Stop port forwarders if any.
for pod_port_forwarder in self.pod_port_forwarders:
pod_port_forwarder.close()
self.pod_port_forwarders = []
for pod_log_collector in self.pod_log_collectors:
if log_drain_sec > 0 and not pod_log_collector.drain_event.is_set():
logger.info(
"Draining logs for %s, timeout %i sec",
pod_log_collector.pod_name,
log_drain_sec,
)
# The close will happen normally at the next message.
pod_log_collector.drain_event.wait(timeout=log_drain_sec)
# Note this will be called from the main thread and may cause
# a race for the log file. Still, at least it'll flush the buffers.
pod_log_collector.flush()
self.pod_log_collectors = []
def get_pod_restarts(self, deployment: k8s.V1Deployment) -> int:
if not self.k8s_namespace or not deployment:
return 0
total_restart: int = 0
pods: List[k8s.V1Pod] = self.k8s_namespace.list_deployment_pods(
deployment
)
for pod in pods:
total_restart += sum(
status.restart_count for status in pod.status.container_statuses
)
return total_restart
@classmethod
def _render_template(cls, template_file, **kwargs):
template = mako.template.Template(filename=str(template_file))
return template.render(**kwargs)
@classmethod
def _manifests_from_yaml_file(cls, yaml_file):
with open(yaml_file) as f:
with contextlib.closing(yaml.safe_load_all(f)) as yml:
for manifest in yml:
yield manifest
@classmethod
def _manifests_from_str(cls, document):
with contextlib.closing(yaml.safe_load_all(document)) as yml:
for manifest in yml:
yield manifest
@classmethod
def _template_file_from_name(cls, template_name):
templates_path = (
pathlib.Path(__file__).parent / cls.TEMPLATE_DIR_RELATIVE_PATH
)
return templates_path.joinpath(template_name).resolve()
def _create_from_template(self, template_name, **kwargs) -> object:
template_file = self._template_file_from_name(template_name)
logger.debug("Loading k8s manifest template: %s", template_file)
yaml_doc = self._render_template(template_file, **kwargs)
logger.info(
"Rendered template %s/%s:\n%s",
self.TEMPLATE_DIR_NAME,
template_name,
self._highlighter.highlight(yaml_doc),
)
manifests = self._manifests_from_str(yaml_doc)
manifest = next(manifests)
# Error out on multi-document yaml
if next(manifests, False):
raise _RunnerError(
f"Exactly one document expected in manifest {template_file}"
)
k8s_objects = self.k8s_namespace.create_single_resource(manifest)
if len(k8s_objects) != 1:
raise _RunnerError(
"Expected exactly one object must created from "
f"manifest {template_file}"
)
logger.info(
"%s %s created", k8s_objects[0].kind, k8s_objects[0].metadata.name
)
return k8s_objects[0]
def _reuse_deployment(self, deployment_name) -> k8s.V1Deployment:
deployment = self.k8s_namespace.get_deployment(deployment_name)
# TODO(sergiitk): check if good or must be recreated
return deployment
def _reuse_service(self, service_name) -> k8s.V1Service:
service = self.k8s_namespace.get_service(service_name)
# TODO(sergiitk): check if good or must be recreated
return service
def _reuse_namespace(self) -> k8s.V1Namespace:
return self.k8s_namespace.get()
def _create_namespace(self, template, **kwargs) -> k8s.V1Namespace:
namespace = self._create_from_template(template, **kwargs)
if not isinstance(namespace, k8s.V1Namespace):
raise _RunnerError(
f"Expected V1Namespace to be created from manifest {template}"
)
if namespace.metadata.name != kwargs["namespace_name"]:
raise _RunnerError(
"V1Namespace created with unexpected name: "
f"{namespace.metadata.name}"
)
logger.debug(
"V1Namespace %s created at %s",
namespace.metadata.self_link,
namespace.metadata.creation_timestamp,
)
return namespace
@classmethod
def _get_workload_identity_member_name(
cls, project, namespace_name, service_account_name
):
"""
Returns workload identity member name used to authenticate Kubernetes
service accounts.
https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity
"""
return (
f"serviceAccount:{project}.svc.id.goog"
f"[{namespace_name}/{service_account_name}]"
)
def _grant_workload_identity_user(
self, *, gcp_iam, gcp_service_account, service_account_name
):
workload_identity_member = self._get_workload_identity_member_name(
gcp_iam.project, self.k8s_namespace.name, service_account_name
)
logger.info(
"Granting %s to %s for GCP Service Account %s",
self.ROLE_WORKLOAD_IDENTITY_USER,
workload_identity_member,
gcp_service_account,
)
gcp_iam.add_service_account_iam_policy_binding(
gcp_service_account,
self.ROLE_WORKLOAD_IDENTITY_USER,
workload_identity_member,
)
def _revoke_workload_identity_user(
self, *, gcp_iam, gcp_service_account, service_account_name
):
workload_identity_member = self._get_workload_identity_member_name(
gcp_iam.project, self.k8s_namespace.name, service_account_name
)
logger.info(
"Revoking %s from %s for GCP Service Account %s",
self.ROLE_WORKLOAD_IDENTITY_USER,
workload_identity_member,
gcp_service_account,
)
try:
gcp_iam.remove_service_account_iam_policy_binding(
gcp_service_account,
self.ROLE_WORKLOAD_IDENTITY_USER,
workload_identity_member,
)
except gcp.api.Error as error:
logger.warning(
"Failed %s from %s for Service Account %s: %r",
self.ROLE_WORKLOAD_IDENTITY_USER,
workload_identity_member,
gcp_service_account,
error,
)
def _create_service_account(
self, template, **kwargs
) -> k8s.V1ServiceAccount:
resource = self._create_from_template(template, **kwargs)
if not isinstance(resource, k8s.V1ServiceAccount):
raise _RunnerError(
"Expected V1ServiceAccount to be created "
f"from manifest {template}"
)
if resource.metadata.name != kwargs["service_account_name"]:
raise _RunnerError(
"V1ServiceAccount created with unexpected name: "
f"{resource.metadata.name}"
)
logger.debug(
"V1ServiceAccount %s created at %s",
resource.metadata.self_link,
resource.metadata.creation_timestamp,
)
return resource
def _create_deployment(self, template, **kwargs) -> k8s.V1Deployment:
# Not making deployment_name an explicit kwarg to be consistent with
# the rest of the _create_* methods, which pass kwargs as-is
# to _create_from_template(), so that the kwargs dict is unpacked into
# template variables and their values.
if "deployment_name" not in kwargs:
raise TypeError(
"Missing required keyword-only argument: deployment_name"
)
# Automatically apply random deployment_id to use in the matchLabels
# to prevent selecting pods in the same namespace belonging to
# a different deployment.
if "deployment_id" not in kwargs:
rand_id: str = framework.helpers.rand.rand_string(lowercase=True)
# Fun edge case: when rand_string() happen to generate numbers only,
# yaml interprets deployment_id label value as an integer,
# but k8s expects label values to be strings. Lol. K8s responds
# with a barely readable 400 Bad Request error: 'ReadString: expects
# \" or n, but found 9, error found in #10 byte of ...|ent_id'.
# Prepending deployment name forces deployment_id into a string,
# as well as it's just a better description.
self.deployment_id = f'{kwargs["deployment_name"]}-{rand_id}'
kwargs["deployment_id"] = self.deployment_id
else:
self.deployment_id = kwargs["deployment_id"]
deployment = self._create_from_template(template, **kwargs)
if not isinstance(deployment, k8s.V1Deployment):
raise _RunnerError(
f"Expected V1Deployment to be created from manifest {template}"
)
if deployment.metadata.name != kwargs["deployment_name"]:
raise _RunnerError(
"V1Deployment created with unexpected name: "
f"{deployment.metadata.name}"
)
logger.debug(
"V1Deployment %s created at %s",
deployment.metadata.self_link,
deployment.metadata.creation_timestamp,
)
return deployment
def _create_service(self, template, **kwargs) -> k8s.V1Service:
service = self._create_from_template(template, **kwargs)
if not isinstance(service, k8s.V1Service):
raise _RunnerError(
f"Expected V1Service to be created from manifest {template}"
)
if service.metadata.name != kwargs["service_name"]:
raise _RunnerError(
"V1Service created with unexpected name: "
f"{service.metadata.name}"
)
logger.debug(
"V1Service %s created at %s",
service.metadata.self_link,
service.metadata.creation_timestamp,
)
return service
def _delete_deployment(self, name, wait_for_deletion=True):
self.stop_pod_dependencies()
logger.info("Deleting deployment %s", name)
try:
self.k8s_namespace.delete_deployment(name)
except (retryers.RetryError, k8s.NotFound) as e:
logger.info("Deployment %s deletion failed: %s", name, e)
return
if wait_for_deletion:
self.k8s_namespace.wait_for_deployment_deleted(name)
logger.debug("Deployment %s deleted", name)
def _delete_service(self, name, wait_for_deletion=True):
logger.info("Deleting service %s", name)
try:
self.k8s_namespace.delete_service(name)
except (retryers.RetryError, k8s.NotFound) as e:
logger.info("Service %s deletion failed: %s", name, e)
return
if wait_for_deletion:
self.k8s_namespace.wait_for_service_deleted(name)
logger.debug("Service %s deleted", name)
def _delete_service_account(self, name, wait_for_deletion=True):
logger.info("Deleting service account %s", name)
try:
self.k8s_namespace.delete_service_account(name)
except (retryers.RetryError, k8s.NotFound) as e:
logger.info("Service account %s deletion failed: %s", name, e)
return
if wait_for_deletion:
self.k8s_namespace.wait_for_service_account_deleted(name)
logger.debug("Service account %s deleted", name)
def delete_namespace(self, wait_for_deletion=True):
logger.info("Deleting namespace %s", self.k8s_namespace.name)
try:
self.k8s_namespace.delete()
except (retryers.RetryError, k8s.NotFound) as e:
logger.info(
"Namespace %s deletion failed: %s", self.k8s_namespace.name, e
)
return
if wait_for_deletion:
self.k8s_namespace.wait_for_namespace_deleted()
logger.debug("Namespace %s deleted", self.k8s_namespace.name)
def _wait_deployment_with_available_replicas(self, name, count=1, **kwargs):
logger.info(
"Waiting for deployment %s to report %s available replica(s)",
name,
count,
)
self.k8s_namespace.wait_for_deployment_available_replicas(
name, count, **kwargs
)
deployment = self.k8s_namespace.get_deployment(name)
logger.info(
"Deployment %s has %i replicas available",
deployment.metadata.name,
deployment.status.available_replicas,
)
def _wait_deployment_pod_count(
self, deployment: k8s.V1Deployment, count: int = 1, **kwargs
) -> List[str]:
logger.info(
"Waiting for deployment %s to initialize %s pod(s)",
deployment.metadata.name,
count,
)
self.k8s_namespace.wait_for_deployment_replica_count(
deployment, count, **kwargs
)
pods = self.k8s_namespace.list_deployment_pods(deployment)
pod_names = [pod.metadata.name for pod in pods]
logger.info(
"Deployment %s initialized %i pod(s): %s",
deployment.metadata.name,
count,
pod_names,
)
# Pods may not be started yet, just return the names.
return pod_names
def _wait_pod_started(self, name, **kwargs) -> k8s.V1Pod:
logger.info("Waiting for pod %s to start", name)
self.k8s_namespace.wait_for_pod_started(name, **kwargs)
pod = self.k8s_namespace.get_pod(name)
logger.info(
"Pod %s ready, IP: %s", pod.metadata.name, pod.status.pod_ip
)
return pod
def _start_port_forwarding_pod(
self, pod: k8s.V1Pod, remote_port: int
) -> k8s.PortForwarder:
logger.info(
"LOCAL DEV MODE: Enabling port forwarding to %s:%s",
pod.status.pod_ip,
remote_port,
)
port_forwarder = self.k8s_namespace.port_forward_pod(pod, remote_port)
self.pod_port_forwarders.append(port_forwarder)
return port_forwarder
def _start_logging_pod(
self, pod: k8s.V1Pod, *, log_to_stdout: bool = False
) -> k8s.PodLogCollector:
pod_name = pod.metadata.name
logfile_name = f"{self.k8s_namespace.name}_{pod_name}.log"
log_path = self.logs_subdir / logfile_name
logger.info(
"Enabling log collection from pod %s to %s",
pod_name,
log_path.relative_to(self.logs_subdir.parent.parent),
)
pod_log_collector = self.k8s_namespace.pod_start_logging(
pod_name=pod_name,
log_path=log_path,
log_stop_event=self.log_stop_event,
log_to_stdout=log_to_stdout,
# Timestamps are enabled because not all language implementations
# include them.
# TODO(sergiitk): Make this setting language-specific.
log_timestamps=True,
)
self.pod_log_collectors.append(pod_log_collector)
return pod_log_collector
def _wait_service_neg(self, name, service_port, **kwargs):
logger.info("Waiting for NEG for service %s", name)
self.k8s_namespace.wait_for_service_neg(name, **kwargs)
neg_name, neg_zones = self.k8s_namespace.get_service_neg(
name, service_port
)
logger.info(
"Service %s: detected NEG=%s in zones=%s", name, neg_name, neg_zones
)
def logs_explorer_link(self):
"""Prints GCP Logs Explorer link to all runs of the deployment."""
self._logs_explorer_link(
deployment_name=self.deployment_name,
namespace_name=self.k8s_namespace.name,
gcp_project=self.gcp_project,
gcp_ui_url=self.gcp_ui_url,
)
def logs_explorer_run_history_links(self):
"""Prints a separate GCP Logs Explorer link for each run *completed* by
the runner.
This excludes the current run, if it hasn't been completed.
"""
if not self.run_history:
logger.info("No completed deployments of %s", self.deployment_name)
return
for run in self.run_history:
self._logs_explorer_link(
deployment_name=self.deployment_name,
namespace_name=self.k8s_namespace.name,
gcp_project=self.gcp_project,
gcp_ui_url=self.gcp_ui_url,
deployment_id=run.deployment_id,
start_time=run.time_start_requested,
end_time=run.time_stopped,
)
@classmethod
def _logs_explorer_link(
cls,
*,
deployment_name: str,
namespace_name: str,
gcp_project: str,
gcp_ui_url: str,
deployment_id: Optional[str] = None,
start_time: Optional[_datetime] = None,
end_time: Optional[_datetime] = None,
):
"""Output the link to test server/client logs in GCP Logs Explorer."""
if not start_time:
start_time = _datetime.now()
if not end_time:
end_time = start_time + _timedelta(minutes=30)
logs_start = _helper_datetime.iso8601_utc_time(start_time)
logs_end = _helper_datetime.iso8601_utc_time(end_time)
request = {"timeRange": f"{logs_start}/{logs_end}"}
query = {
"resource.type": "k8s_container",
"resource.labels.project_id": gcp_project,
"resource.labels.container_name": deployment_name,
"resource.labels.namespace_name": namespace_name,
}
if deployment_id:
query['labels."k8s-pod/deployment_id"'] = deployment_id
link = cls._logs_explorer_link_from_params(
gcp_ui_url=gcp_ui_url,
gcp_project=gcp_project,
query=query,
request=request,
)
link_to = deployment_id if deployment_id else deployment_name
# A whitespace at the end to indicate the end of the url.
logger.info("GCP Logs Explorer link to %s:\n%s ", link_to, link)
@classmethod
def _make_namespace_name(
cls, resource_prefix: str, resource_suffix: str, name: str
) -> str:
"""A helper to make consistent test app kubernetes namespace name
for given resource prefix and suffix."""
parts = [resource_prefix, name]
# Avoid trailing dash when the suffix is empty.
if resource_suffix:
parts.append(resource_suffix)
return "-".join(parts)
| 25,683
| 36.494891
| 80
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/test_app/runners/k8s/__init__.py
|
# Copyright 2022 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 577
| 40.285714
| 74
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/test_app/runners/k8s/k8s_xds_client_runner.py
|
# Copyright 2022 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Run xDS Test Client on Kubernetes.
"""
import logging
from typing import Optional
from framework.infrastructure import gcp
from framework.infrastructure import k8s
from framework.test_app.client_app import XdsTestClient
from framework.test_app.runners.k8s import k8s_base_runner
logger = logging.getLogger(__name__)
class KubernetesClientRunner(k8s_base_runner.KubernetesBaseRunner):
# Required fields.
xds_server_uri: str
stats_port: int
deployment_template: str
enable_workload_identity: bool
debug_use_port_forwarding: bool
td_bootstrap_image: str
network: str
# Optional fields.
service_account_name: Optional[str] = None
service_account_template: Optional[str] = None
gcp_iam: Optional[gcp.iam.IamV1] = None
def __init__( # pylint: disable=too-many-locals
self,
k8s_namespace: k8s.KubernetesNamespace,
*,
deployment_name: str,
image_name: str,
td_bootstrap_image: str,
network="default",
xds_server_uri: Optional[str] = None,
gcp_api_manager: gcp.api.GcpApiManager,
gcp_project: str,
gcp_service_account: str,
service_account_name: Optional[str] = None,
stats_port: int = 8079,
deployment_template: str = "client.deployment.yaml",
service_account_template: str = "service-account.yaml",
reuse_namespace: bool = False,
namespace_template: Optional[str] = None,
debug_use_port_forwarding: bool = False,
enable_workload_identity: bool = True,
):
super().__init__(
k8s_namespace,
deployment_name=deployment_name,
image_name=image_name,
gcp_project=gcp_project,
gcp_service_account=gcp_service_account,
gcp_ui_url=gcp_api_manager.gcp_ui_url,
namespace_template=namespace_template,
reuse_namespace=reuse_namespace,
)
# Settings
self.stats_port = stats_port
self.deployment_template = deployment_template
self.enable_workload_identity = enable_workload_identity
self.debug_use_port_forwarding = debug_use_port_forwarding
# Used by the TD bootstrap generator.
self.td_bootstrap_image = td_bootstrap_image
self.network = network
self.xds_server_uri = xds_server_uri
# Workload identity settings:
if self.enable_workload_identity:
# Kubernetes service account.
self.service_account_name = service_account_name or deployment_name
self.service_account_template = service_account_template
# GCP IAM API used to grant allow workload service accounts
# permission to use GCP service account identity.
self.gcp_iam = gcp.iam.IamV1(gcp_api_manager, gcp_project)
def run( # pylint: disable=arguments-differ
self,
*,
server_target,
rpc="UnaryCall",
qps=25,
metadata="",
secure_mode=False,
config_mesh=None,
print_response=False,
log_to_stdout: bool = False,
) -> XdsTestClient:
logger.info(
(
'Deploying xDS test client "%s" to k8s namespace %s: '
"server_target=%s rpc=%s qps=%s metadata=%r secure_mode=%s "
"print_response=%s"
),
self.deployment_name,
self.k8s_namespace.name,
server_target,
rpc,
qps,
metadata,
secure_mode,
print_response,
)
super().run()
if self.enable_workload_identity:
# Allow Kubernetes service account to use the GCP service account
# identity.
self._grant_workload_identity_user(
gcp_iam=self.gcp_iam,
gcp_service_account=self.gcp_service_account,
service_account_name=self.service_account_name,
)
# Create service account
self.service_account = self._create_service_account(
self.service_account_template,
service_account_name=self.service_account_name,
namespace_name=self.k8s_namespace.name,
gcp_service_account=self.gcp_service_account,
)
# Always create a new deployment
self.deployment = self._create_deployment(
self.deployment_template,
deployment_name=self.deployment_name,
image_name=self.image_name,
namespace_name=self.k8s_namespace.name,
service_account_name=self.service_account_name,
td_bootstrap_image=self.td_bootstrap_image,
xds_server_uri=self.xds_server_uri,
network=self.network,
stats_port=self.stats_port,
server_target=server_target,
rpc=rpc,
qps=qps,
metadata=metadata,
secure_mode=secure_mode,
config_mesh=config_mesh,
print_response=print_response,
)
# Load test client pod. We need only one client at the moment
pod_name = self._wait_deployment_pod_count(self.deployment)[0]
pod: k8s.V1Pod = self._wait_pod_started(pod_name)
if self.should_collect_logs:
self._start_logging_pod(pod, log_to_stdout=log_to_stdout)
# Verify the deployment reports all pods started as well.
self._wait_deployment_with_available_replicas(self.deployment_name)
self._start_completed()
return self._xds_test_client_for_pod(pod, server_target=server_target)
def _xds_test_client_for_pod(
self, pod: k8s.V1Pod, *, server_target: str
) -> XdsTestClient:
if self.debug_use_port_forwarding:
pf = self._start_port_forwarding_pod(pod, self.stats_port)
rpc_port, rpc_host = pf.local_port, pf.local_address
else:
rpc_port, rpc_host = self.stats_port, None
return XdsTestClient(
ip=pod.status.pod_ip,
rpc_port=rpc_port,
server_target=server_target,
hostname=pod.metadata.name,
rpc_host=rpc_host,
)
# pylint: disable=arguments-differ
def cleanup(self, *, force=False, force_namespace=False):
# TODO(sergiitk): rename to stop().
try:
if self.deployment or force:
self._delete_deployment(self.deployment_name)
self.deployment = None
if self.enable_workload_identity and (
self.service_account or force
):
self._revoke_workload_identity_user(
gcp_iam=self.gcp_iam,
gcp_service_account=self.gcp_service_account,
service_account_name=self.service_account_name,
)
self._delete_service_account(self.service_account_name)
self.service_account = None
self._cleanup_namespace(force=force_namespace and force)
finally:
self._stop()
# pylint: enable=arguments-differ
@classmethod
def make_namespace_name(
cls, resource_prefix: str, resource_suffix: str, name: str = "client"
) -> str:
"""A helper to make consistent XdsTestClient kubernetes namespace name
for given resource prefix and suffix.
Note: the idea is to intentionally produce different namespace name for
the test server, and the test client, as that closely mimics real-world
deployments.
"""
return cls._make_namespace_name(resource_prefix, resource_suffix, name)
| 8,270
| 35.76
| 79
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/test_app/runners/k8s/k8s_xds_server_runner.py
|
# Copyright 2022 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Run xDS Test Client on Kubernetes.
"""
import logging
from typing import List, Optional
from framework.infrastructure import gcp
from framework.infrastructure import k8s
from framework.test_app.runners.k8s import k8s_base_runner
from framework.test_app.server_app import XdsTestServer
logger = logging.getLogger(__name__)
class KubernetesServerRunner(k8s_base_runner.KubernetesBaseRunner):
DEFAULT_TEST_PORT = 8080
DEFAULT_MAINTENANCE_PORT = 8080
DEFAULT_SECURE_MODE_MAINTENANCE_PORT = 8081
# Required fields.
deployment_template: str
service_name: str
service_template: str
reuse_service: bool
enable_workload_identity: bool
debug_use_port_forwarding: bool
gcp_neg_name: str
td_bootstrap_image: str
xds_server_uri: str
network: str
# Optional fields.
service_account_name: Optional[str] = None
service_account_template: Optional[str] = None
gcp_iam: Optional[gcp.iam.IamV1] = None
# Mutable state.
service: Optional[k8s.V1Service] = None
def __init__( # pylint: disable=too-many-locals
self,
k8s_namespace: k8s.KubernetesNamespace,
*,
deployment_name: str,
image_name: str,
td_bootstrap_image: str,
network: str = "default",
xds_server_uri: Optional[str] = None,
gcp_api_manager: gcp.api.GcpApiManager,
gcp_project: str,
gcp_service_account: str,
service_account_name: Optional[str] = None,
service_name: Optional[str] = None,
neg_name: Optional[str] = None,
deployment_template: str = "server.deployment.yaml",
service_account_template: str = "service-account.yaml",
service_template: str = "server.service.yaml",
reuse_service: bool = False,
reuse_namespace: bool = False,
namespace_template: Optional[str] = None,
debug_use_port_forwarding: bool = False,
enable_workload_identity: bool = True,
):
super().__init__(
k8s_namespace,
deployment_name=deployment_name,
image_name=image_name,
gcp_project=gcp_project,
gcp_service_account=gcp_service_account,
gcp_ui_url=gcp_api_manager.gcp_ui_url,
namespace_template=namespace_template,
reuse_namespace=reuse_namespace,
)
# Settings
self.deployment_template = deployment_template
self.service_name = service_name or deployment_name
self.service_template = service_template
self.reuse_service = reuse_service
self.enable_workload_identity = enable_workload_identity
self.debug_use_port_forwarding = debug_use_port_forwarding
# GCP Network Endpoint Group.
self.gcp_neg_name = neg_name or (
f"{self.k8s_namespace.name}-{self.service_name}"
)
# Used by the TD bootstrap generator.
self.td_bootstrap_image = td_bootstrap_image
self.network = network
self.xds_server_uri = xds_server_uri
# Workload identity settings:
if self.enable_workload_identity:
# Kubernetes service account.
self.service_account_name = service_account_name or deployment_name
self.service_account_template = service_account_template
# GCP IAM API used to grant allow workload service accounts
# permission to use GCP service account identity.
self.gcp_iam = gcp.iam.IamV1(gcp_api_manager, gcp_project)
def run( # pylint: disable=arguments-differ,too-many-branches
self,
*,
test_port: int = DEFAULT_TEST_PORT,
maintenance_port: Optional[int] = None,
secure_mode: bool = False,
replica_count: int = 1,
log_to_stdout: bool = False,
) -> List[XdsTestServer]:
if not maintenance_port:
maintenance_port = self._get_default_maintenance_port(secure_mode)
# Implementation detail: in secure mode, maintenance ("backchannel")
# port must be different from the test port so communication with
# maintenance services can be reached independently of the security
# configuration under test.
if secure_mode and maintenance_port == test_port:
raise ValueError(
"port and maintenance_port must be different "
"when running test server in secure mode"
)
# To avoid bugs with comparing wrong types.
if not (
isinstance(test_port, int) and isinstance(maintenance_port, int)
):
raise TypeError("Port numbers must be integer")
if secure_mode and not self.enable_workload_identity:
raise ValueError("Secure mode requires Workload Identity enabled.")
logger.info(
(
'Deploying xDS test server "%s" to k8s namespace %s:'
" test_port=%s maintenance_port=%s secure_mode=%s"
" replica_count=%s"
),
self.deployment_name,
self.k8s_namespace.name,
test_port,
maintenance_port,
secure_mode,
replica_count,
)
super().run()
# Reuse existing if requested, create a new deployment when missing.
# Useful for debugging to avoid NEG loosing relation to deleted service.
if self.reuse_service:
self.service = self._reuse_service(self.service_name)
if not self.service:
self.service = self._create_service(
self.service_template,
service_name=self.service_name,
namespace_name=self.k8s_namespace.name,
deployment_name=self.deployment_name,
neg_name=self.gcp_neg_name,
test_port=test_port,
)
self._wait_service_neg(self.service_name, test_port)
if self.enable_workload_identity:
# Allow Kubernetes service account to use the GCP service account
# identity.
self._grant_workload_identity_user(
gcp_iam=self.gcp_iam,
gcp_service_account=self.gcp_service_account,
service_account_name=self.service_account_name,
)
# Create service account
self.service_account = self._create_service_account(
self.service_account_template,
service_account_name=self.service_account_name,
namespace_name=self.k8s_namespace.name,
gcp_service_account=self.gcp_service_account,
)
# Always create a new deployment
self.deployment = self._create_deployment(
self.deployment_template,
deployment_name=self.deployment_name,
image_name=self.image_name,
namespace_name=self.k8s_namespace.name,
service_account_name=self.service_account_name,
td_bootstrap_image=self.td_bootstrap_image,
xds_server_uri=self.xds_server_uri,
network=self.network,
replica_count=replica_count,
test_port=test_port,
maintenance_port=maintenance_port,
secure_mode=secure_mode,
)
pod_names = self._wait_deployment_pod_count(
self.deployment, replica_count
)
pods = []
for pod_name in pod_names:
pod = self._wait_pod_started(pod_name)
pods.append(pod)
if self.should_collect_logs:
self._start_logging_pod(pod, log_to_stdout=log_to_stdout)
# Verify the deployment reports all pods started as well.
self._wait_deployment_with_available_replicas(
self.deployment_name, replica_count
)
self._start_completed()
servers: List[XdsTestServer] = []
for pod in pods:
servers.append(
self._xds_test_server_for_pod(
pod,
test_port=test_port,
maintenance_port=maintenance_port,
secure_mode=secure_mode,
)
)
return servers
def _get_default_maintenance_port(self, secure_mode: bool) -> int:
if not secure_mode:
maintenance_port = self.DEFAULT_MAINTENANCE_PORT
else:
maintenance_port = self.DEFAULT_SECURE_MODE_MAINTENANCE_PORT
return maintenance_port
def _xds_test_server_for_pod(
self,
pod: k8s.V1Pod,
*,
test_port: int = DEFAULT_TEST_PORT,
maintenance_port: Optional[int] = None,
secure_mode: bool = False,
) -> XdsTestServer:
if maintenance_port is None:
maintenance_port = self._get_default_maintenance_port(secure_mode)
if self.debug_use_port_forwarding:
pf = self._start_port_forwarding_pod(pod, maintenance_port)
rpc_port, rpc_host = pf.local_port, pf.local_address
else:
rpc_port, rpc_host = maintenance_port, None
return XdsTestServer(
ip=pod.status.pod_ip,
rpc_port=test_port,
hostname=pod.metadata.name,
maintenance_port=rpc_port,
secure_mode=secure_mode,
rpc_host=rpc_host,
)
# pylint: disable=arguments-differ
def cleanup(self, *, force=False, force_namespace=False):
# TODO(sergiitk): rename to stop().
try:
if self.deployment or force:
self._delete_deployment(self.deployment_name)
self.deployment = None
if (self.service and not self.reuse_service) or force:
self._delete_service(self.service_name)
self.service = None
if self.enable_workload_identity and (
self.service_account or force
):
self._revoke_workload_identity_user(
gcp_iam=self.gcp_iam,
gcp_service_account=self.gcp_service_account,
service_account_name=self.service_account_name,
)
self._delete_service_account(self.service_account_name)
self.service_account = None
self._cleanup_namespace(force=(force_namespace and force))
finally:
self._stop()
# pylint: enable=arguments-differ
@classmethod
def make_namespace_name(
cls, resource_prefix: str, resource_suffix: str, name: str = "server"
) -> str:
"""A helper to make consistent XdsTestServer kubernetes namespace name
for given resource prefix and suffix.
Note: the idea is to intentionally produce different namespace name for
the test server, and the test client, as that closely mimics real-world
deployments.
"""
return cls._make_namespace_name(resource_prefix, resource_suffix, name)
| 11,543
| 36.72549
| 80
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/infrastructure/traffic_director.py
|
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logging
import random
from typing import Any, Dict, List, Optional, Set
from framework import xds_flags
from framework.infrastructure import gcp
logger = logging.getLogger(__name__)
# Type aliases
# Compute
_ComputeV1 = gcp.compute.ComputeV1
GcpResource = _ComputeV1.GcpResource
HealthCheckProtocol = _ComputeV1.HealthCheckProtocol
ZonalGcpResource = _ComputeV1.ZonalGcpResource
BackendServiceProtocol = _ComputeV1.BackendServiceProtocol
_BackendGRPC = BackendServiceProtocol.GRPC
_HealthCheckGRPC = HealthCheckProtocol.GRPC
# Network Security
_NetworkSecurityV1Beta1 = gcp.network_security.NetworkSecurityV1Beta1
ServerTlsPolicy = gcp.network_security.ServerTlsPolicy
ClientTlsPolicy = gcp.network_security.ClientTlsPolicy
AuthorizationPolicy = gcp.network_security.AuthorizationPolicy
# Network Services
_NetworkServicesV1Alpha1 = gcp.network_services.NetworkServicesV1Alpha1
_NetworkServicesV1Beta1 = gcp.network_services.NetworkServicesV1Beta1
EndpointPolicy = gcp.network_services.EndpointPolicy
GrpcRoute = gcp.network_services.GrpcRoute
Mesh = gcp.network_services.Mesh
# Testing metadata consts
TEST_AFFINITY_METADATA_KEY = "xds_md"
class TrafficDirectorManager: # pylint: disable=too-many-public-methods
compute: _ComputeV1
resource_prefix: str
resource_suffix: str
BACKEND_SERVICE_NAME = "backend-service"
ALTERNATIVE_BACKEND_SERVICE_NAME = "backend-service-alt"
AFFINITY_BACKEND_SERVICE_NAME = "backend-service-affinity"
HEALTH_CHECK_NAME = "health-check"
URL_MAP_NAME = "url-map"
ALTERNATIVE_URL_MAP_NAME = "url-map-alt"
URL_MAP_PATH_MATCHER_NAME = "path-matcher"
TARGET_PROXY_NAME = "target-proxy"
ALTERNATIVE_TARGET_PROXY_NAME = "target-proxy-alt"
FORWARDING_RULE_NAME = "forwarding-rule"
ALTERNATIVE_FORWARDING_RULE_NAME = "forwarding-rule-alt"
FIREWALL_RULE_NAME = "allow-health-checks"
def __init__(
self,
gcp_api_manager: gcp.api.GcpApiManager,
project: str,
*,
resource_prefix: str,
resource_suffix: str,
network: str = "default",
compute_api_version: str = "v1",
):
# API
self.compute = _ComputeV1(
gcp_api_manager, project, version=compute_api_version
)
# Settings
self.project: str = project
self.network: str = network
self.resource_prefix: str = resource_prefix
self.resource_suffix: str = resource_suffix
# Managed resources
self.health_check: Optional[GcpResource] = None
self.backend_service: Optional[GcpResource] = None
# TODO(sergiitk): remove this flag once backend service resource loaded
self.backend_service_protocol: Optional[BackendServiceProtocol] = None
self.url_map: Optional[GcpResource] = None
self.alternative_url_map: Optional[GcpResource] = None
self.firewall_rule: Optional[GcpResource] = None
self.target_proxy: Optional[GcpResource] = None
# TODO(sergiitk): remove this flag once target proxy resource loaded
self.target_proxy_is_http: bool = False
self.alternative_target_proxy: Optional[GcpResource] = None
self.forwarding_rule: Optional[GcpResource] = None
self.alternative_forwarding_rule: Optional[GcpResource] = None
self.backends: Set[ZonalGcpResource] = set()
self.alternative_backend_service: Optional[GcpResource] = None
# TODO(sergiitk): remove this flag once backend service resource loaded
self.alternative_backend_service_protocol: Optional[
BackendServiceProtocol
] = None
self.alternative_backends: Set[ZonalGcpResource] = set()
self.affinity_backend_service: Optional[GcpResource] = None
# TODO(sergiitk): remove this flag once backend service resource loaded
self.affinity_backend_service_protocol: Optional[
BackendServiceProtocol
] = None
self.affinity_backends: Set[ZonalGcpResource] = set()
@property
def network_url(self):
return f"global/networks/{self.network}"
def setup_for_grpc(
self,
service_host,
service_port,
*,
backend_protocol: Optional[BackendServiceProtocol] = _BackendGRPC,
health_check_port: Optional[int] = None,
):
self.setup_backend_for_grpc(
protocol=backend_protocol, health_check_port=health_check_port
)
self.setup_routing_rule_map_for_grpc(service_host, service_port)
def setup_backend_for_grpc(
self,
*,
protocol: Optional[BackendServiceProtocol] = _BackendGRPC,
health_check_port: Optional[int] = None,
):
self.create_health_check(port=health_check_port)
self.create_backend_service(protocol)
def setup_routing_rule_map_for_grpc(self, service_host, service_port):
self.create_url_map(service_host, service_port)
self.create_target_proxy()
self.create_forwarding_rule(service_port)
def cleanup(self, *, force=False):
# Cleanup in the reverse order of creation
self.delete_forwarding_rule(force=force)
self.delete_alternative_forwarding_rule(force=force)
self.delete_target_http_proxy(force=force)
self.delete_target_grpc_proxy(force=force)
self.delete_alternative_target_grpc_proxy(force=force)
self.delete_url_map(force=force)
self.delete_alternative_url_map(force=force)
self.delete_backend_service(force=force)
self.delete_alternative_backend_service(force=force)
self.delete_affinity_backend_service(force=force)
self.delete_health_check(force=force)
@functools.lru_cache(None)
def make_resource_name(self, name: str) -> str:
"""Make dash-separated resource name with resource prefix and suffix."""
parts = [self.resource_prefix, name]
# Avoid trailing dash when the suffix is empty.
if self.resource_suffix:
parts.append(self.resource_suffix)
return "-".join(parts)
def create_health_check(
self,
*,
protocol: Optional[HealthCheckProtocol] = _HealthCheckGRPC,
port: Optional[int] = None,
):
if self.health_check:
raise ValueError(
f"Health check {self.health_check.name} "
"already created, delete it first"
)
if protocol is None:
protocol = _HealthCheckGRPC
name = self.make_resource_name(self.HEALTH_CHECK_NAME)
logger.info('Creating %s Health Check "%s"', protocol.name, name)
resource = self.compute.create_health_check(name, protocol, port=port)
self.health_check = resource
def delete_health_check(self, force=False):
if force:
name = self.make_resource_name(self.HEALTH_CHECK_NAME)
elif self.health_check:
name = self.health_check.name
else:
return
logger.info('Deleting Health Check "%s"', name)
self.compute.delete_health_check(name)
self.health_check = None
def create_backend_service(
self,
protocol: Optional[BackendServiceProtocol] = _BackendGRPC,
subset_size: Optional[int] = None,
affinity_header: Optional[str] = None,
locality_lb_policies: Optional[List[dict]] = None,
outlier_detection: Optional[dict] = None,
):
if protocol is None:
protocol = _BackendGRPC
name = self.make_resource_name(self.BACKEND_SERVICE_NAME)
logger.info('Creating %s Backend Service "%s"', protocol.name, name)
resource = self.compute.create_backend_service_traffic_director(
name,
health_check=self.health_check,
protocol=protocol,
subset_size=subset_size,
affinity_header=affinity_header,
locality_lb_policies=locality_lb_policies,
outlier_detection=outlier_detection,
)
self.backend_service = resource
self.backend_service_protocol = protocol
def load_backend_service(self):
name = self.make_resource_name(self.BACKEND_SERVICE_NAME)
resource = self.compute.get_backend_service_traffic_director(name)
self.backend_service = resource
def delete_backend_service(self, force=False):
if force:
name = self.make_resource_name(self.BACKEND_SERVICE_NAME)
elif self.backend_service:
name = self.backend_service.name
else:
return
logger.info('Deleting Backend Service "%s"', name)
self.compute.delete_backend_service(name)
self.backend_service = None
def backend_service_add_neg_backends(
self, name, zones, max_rate_per_endpoint: Optional[int] = None
):
logger.info("Waiting for Network Endpoint Groups to load endpoints.")
for zone in zones:
backend = self.compute.wait_for_network_endpoint_group(name, zone)
logger.info(
'Loaded NEG "%s" in zone %s', backend.name, backend.zone
)
self.backends.add(backend)
self.backend_service_patch_backends(max_rate_per_endpoint)
def backend_service_remove_neg_backends(self, name, zones):
logger.info("Waiting for Network Endpoint Groups to load endpoints.")
for zone in zones:
backend = self.compute.wait_for_network_endpoint_group(name, zone)
logger.info(
'Loaded NEG "%s" in zone %s', backend.name, backend.zone
)
self.backends.remove(backend)
self.backend_service_patch_backends()
def backend_service_patch_backends(
self, max_rate_per_endpoint: Optional[int] = None
):
logging.info(
"Adding backends to Backend Service %s: %r",
self.backend_service.name,
self.backends,
)
self.compute.backend_service_patch_backends(
self.backend_service, self.backends, max_rate_per_endpoint
)
def backend_service_remove_all_backends(self):
logging.info(
"Removing backends from Backend Service %s",
self.backend_service.name,
)
self.compute.backend_service_remove_all_backends(self.backend_service)
def wait_for_backends_healthy_status(self):
logger.debug(
"Waiting for Backend Service %s to report all backends healthy %r",
self.backend_service,
self.backends,
)
self.compute.wait_for_backends_healthy_status(
self.backend_service, self.backends
)
def create_alternative_backend_service(
self, protocol: Optional[BackendServiceProtocol] = _BackendGRPC
):
if protocol is None:
protocol = _BackendGRPC
name = self.make_resource_name(self.ALTERNATIVE_BACKEND_SERVICE_NAME)
logger.info(
'Creating %s Alternative Backend Service "%s"', protocol.name, name
)
resource = self.compute.create_backend_service_traffic_director(
name, health_check=self.health_check, protocol=protocol
)
self.alternative_backend_service = resource
self.alternative_backend_service_protocol = protocol
def load_alternative_backend_service(self):
name = self.make_resource_name(self.ALTERNATIVE_BACKEND_SERVICE_NAME)
resource = self.compute.get_backend_service_traffic_director(name)
self.alternative_backend_service = resource
def delete_alternative_backend_service(self, force=False):
if force:
name = self.make_resource_name(
self.ALTERNATIVE_BACKEND_SERVICE_NAME
)
elif self.alternative_backend_service:
name = self.alternative_backend_service.name
else:
return
logger.info('Deleting Alternative Backend Service "%s"', name)
self.compute.delete_backend_service(name)
self.alternative_backend_service = None
def alternative_backend_service_add_neg_backends(self, name, zones):
logger.info("Waiting for Network Endpoint Groups to load endpoints.")
for zone in zones:
backend = self.compute.wait_for_network_endpoint_group(name, zone)
logger.info(
'Loaded NEG "%s" in zone %s', backend.name, backend.zone
)
self.alternative_backends.add(backend)
self.alternative_backend_service_patch_backends()
def alternative_backend_service_patch_backends(self):
logging.info(
"Adding backends to Backend Service %s: %r",
self.alternative_backend_service.name,
self.alternative_backends,
)
self.compute.backend_service_patch_backends(
self.alternative_backend_service, self.alternative_backends
)
def alternative_backend_service_remove_all_backends(self):
logging.info(
"Removing backends from Backend Service %s",
self.alternative_backend_service.name,
)
self.compute.backend_service_remove_all_backends(
self.alternative_backend_service
)
def wait_for_alternative_backends_healthy_status(self):
logger.debug(
"Waiting for Backend Service %s to report all backends healthy %r",
self.alternative_backend_service,
self.alternative_backends,
)
self.compute.wait_for_backends_healthy_status(
self.alternative_backend_service, self.alternative_backends
)
def create_affinity_backend_service(
self, protocol: Optional[BackendServiceProtocol] = _BackendGRPC
):
if protocol is None:
protocol = _BackendGRPC
name = self.make_resource_name(self.AFFINITY_BACKEND_SERVICE_NAME)
logger.info(
'Creating %s Affinity Backend Service "%s"', protocol.name, name
)
resource = self.compute.create_backend_service_traffic_director(
name,
health_check=self.health_check,
protocol=protocol,
affinity_header=TEST_AFFINITY_METADATA_KEY,
)
self.affinity_backend_service = resource
self.affinity_backend_service_protocol = protocol
def load_affinity_backend_service(self):
name = self.make_resource_name(self.AFFINITY_BACKEND_SERVICE_NAME)
resource = self.compute.get_backend_service_traffic_director(name)
self.affinity_backend_service = resource
def delete_affinity_backend_service(self, force=False):
if force:
name = self.make_resource_name(self.AFFINITY_BACKEND_SERVICE_NAME)
elif self.affinity_backend_service:
name = self.affinity_backend_service.name
else:
return
logger.info('Deleting Affinity Backend Service "%s"', name)
self.compute.delete_backend_service(name)
self.affinity_backend_service = None
def affinity_backend_service_add_neg_backends(self, name, zones):
logger.info("Waiting for Network Endpoint Groups to load endpoints.")
for zone in zones:
backend = self.compute.wait_for_network_endpoint_group(name, zone)
logger.info(
'Loaded NEG "%s" in zone %s', backend.name, backend.zone
)
self.affinity_backends.add(backend)
self.affinity_backend_service_patch_backends()
def affinity_backend_service_patch_backends(self):
logging.info(
"Adding backends to Backend Service %s: %r",
self.affinity_backend_service.name,
self.affinity_backends,
)
self.compute.backend_service_patch_backends(
self.affinity_backend_service, self.affinity_backends
)
def affinity_backend_service_remove_all_backends(self):
logging.info(
"Removing backends from Backend Service %s",
self.affinity_backend_service.name,
)
self.compute.backend_service_remove_all_backends(
self.affinity_backend_service
)
def wait_for_affinity_backends_healthy_status(self):
logger.debug(
"Waiting for Backend Service %s to report all backends healthy %r",
self.affinity_backend_service,
self.affinity_backends,
)
self.compute.wait_for_backends_healthy_status(
self.affinity_backend_service, self.affinity_backends
)
@staticmethod
def _generate_url_map_body(
name: str,
matcher_name: str,
src_hosts,
dst_default_backend_service: GcpResource,
dst_host_rule_match_backend_service: Optional[GcpResource] = None,
) -> Dict[str, Any]:
if dst_host_rule_match_backend_service is None:
dst_host_rule_match_backend_service = dst_default_backend_service
return {
"name": name,
"defaultService": dst_default_backend_service.url,
"hostRules": [
{
"hosts": src_hosts,
"pathMatcher": matcher_name,
}
],
"pathMatchers": [
{
"name": matcher_name,
"defaultService": dst_host_rule_match_backend_service.url,
}
],
}
def create_url_map(self, src_host: str, src_port: int) -> GcpResource:
src_address = f"{src_host}:{src_port}"
name = self.make_resource_name(self.URL_MAP_NAME)
matcher_name = self.make_resource_name(self.URL_MAP_PATH_MATCHER_NAME)
logger.info(
'Creating URL map "%s": %s -> %s',
name,
src_address,
self.backend_service.name,
)
resource = self.compute.create_url_map_with_content(
self._generate_url_map_body(
name, matcher_name, [src_address], self.backend_service
)
)
self.url_map = resource
return resource
def patch_url_map(
self, src_host: str, src_port: int, backend_service: GcpResource
):
src_address = f"{src_host}:{src_port}"
name = self.make_resource_name(self.URL_MAP_NAME)
matcher_name = self.make_resource_name(self.URL_MAP_PATH_MATCHER_NAME)
logger.info(
'Patching URL map "%s": %s -> %s',
name,
src_address,
backend_service.name,
)
self.compute.patch_url_map(
self.url_map,
self._generate_url_map_body(
name, matcher_name, [src_address], backend_service
),
)
def create_url_map_with_content(self, url_map_body: Any) -> GcpResource:
logger.info("Creating URL map: %s", url_map_body)
resource = self.compute.create_url_map_with_content(url_map_body)
self.url_map = resource
return resource
def delete_url_map(self, force=False):
if force:
name = self.make_resource_name(self.URL_MAP_NAME)
elif self.url_map:
name = self.url_map.name
else:
return
logger.info('Deleting URL Map "%s"', name)
self.compute.delete_url_map(name)
self.url_map = None
def create_alternative_url_map(
self,
src_host: str,
src_port: int,
backend_service: Optional[GcpResource] = None,
) -> GcpResource:
name = self.make_resource_name(self.ALTERNATIVE_URL_MAP_NAME)
src_address = f"{src_host}:{src_port}"
matcher_name = self.make_resource_name(self.URL_MAP_PATH_MATCHER_NAME)
if backend_service is None:
backend_service = self.alternative_backend_service
logger.info(
'Creating alternative URL map "%s": %s -> %s',
name,
src_address,
backend_service.name,
)
resource = self.compute.create_url_map_with_content(
self._generate_url_map_body(
name, matcher_name, [src_address], backend_service
)
)
self.alternative_url_map = resource
return resource
def delete_alternative_url_map(self, force=False):
if force:
name = self.make_resource_name(self.ALTERNATIVE_URL_MAP_NAME)
elif self.alternative_url_map:
name = self.alternative_url_map.name
else:
return
logger.info('Deleting alternative URL Map "%s"', name)
self.compute.delete_url_map(name)
self.url_map = None
def create_target_proxy(self):
name = self.make_resource_name(self.TARGET_PROXY_NAME)
if self.backend_service_protocol is BackendServiceProtocol.GRPC:
target_proxy_type = "GRPC"
create_proxy_fn = self.compute.create_target_grpc_proxy
self.target_proxy_is_http = False
elif self.backend_service_protocol is BackendServiceProtocol.HTTP2:
target_proxy_type = "HTTP"
create_proxy_fn = self.compute.create_target_http_proxy
self.target_proxy_is_http = True
else:
raise TypeError("Unexpected backend service protocol")
logger.info(
'Creating target %s proxy "%s" to URL map %s',
name,
target_proxy_type,
self.url_map.name,
)
self.target_proxy = create_proxy_fn(name, self.url_map)
def delete_target_grpc_proxy(self, force=False):
if force:
name = self.make_resource_name(self.TARGET_PROXY_NAME)
elif self.target_proxy:
name = self.target_proxy.name
else:
return
logger.info('Deleting Target GRPC proxy "%s"', name)
self.compute.delete_target_grpc_proxy(name)
self.target_proxy = None
self.target_proxy_is_http = False
def delete_target_http_proxy(self, force=False):
if force:
name = self.make_resource_name(self.TARGET_PROXY_NAME)
elif self.target_proxy and self.target_proxy_is_http:
name = self.target_proxy.name
else:
return
logger.info('Deleting HTTP Target proxy "%s"', name)
self.compute.delete_target_http_proxy(name)
self.target_proxy = None
self.target_proxy_is_http = False
def create_alternative_target_proxy(self):
name = self.make_resource_name(self.ALTERNATIVE_TARGET_PROXY_NAME)
if self.backend_service_protocol is BackendServiceProtocol.GRPC:
logger.info(
'Creating alternative target GRPC proxy "%s" to URL map %s',
name,
self.alternative_url_map.name,
)
self.alternative_target_proxy = (
self.compute.create_target_grpc_proxy(
name, self.alternative_url_map, False
)
)
else:
raise TypeError("Unexpected backend service protocol")
def delete_alternative_target_grpc_proxy(self, force=False):
if force:
name = self.make_resource_name(self.ALTERNATIVE_TARGET_PROXY_NAME)
elif self.alternative_target_proxy:
name = self.alternative_target_proxy.name
else:
return
logger.info('Deleting alternative Target GRPC proxy "%s"', name)
self.compute.delete_target_grpc_proxy(name)
self.alternative_target_proxy = None
def find_unused_forwarding_rule_port(
self,
*,
lo: int = 1024, # To avoid confusion, skip well-known ports.
hi: int = 65535,
attempts: int = 25,
) -> int:
for _ in range(attempts):
src_port = random.randint(lo, hi)
if not self.compute.exists_forwarding_rule(src_port):
return src_port
# TODO(sergiitk): custom exception
raise RuntimeError("Couldn't find unused forwarding rule port")
def create_forwarding_rule(self, src_port: int):
name = self.make_resource_name(self.FORWARDING_RULE_NAME)
src_port = int(src_port)
logging.info(
'Creating forwarding rule "%s" in network "%s": 0.0.0.0:%s -> %s',
name,
self.network,
src_port,
self.target_proxy.url,
)
resource = self.compute.create_forwarding_rule(
name, src_port, self.target_proxy, self.network_url
)
self.forwarding_rule = resource
return resource
def delete_forwarding_rule(self, force=False):
if force:
name = self.make_resource_name(self.FORWARDING_RULE_NAME)
elif self.forwarding_rule:
name = self.forwarding_rule.name
else:
return
logger.info('Deleting Forwarding rule "%s"', name)
self.compute.delete_forwarding_rule(name)
self.forwarding_rule = None
def create_alternative_forwarding_rule(
self, src_port: int, ip_address="0.0.0.0"
):
name = self.make_resource_name(self.ALTERNATIVE_FORWARDING_RULE_NAME)
src_port = int(src_port)
logging.info(
(
'Creating alternative forwarding rule "%s" in network "%s":'
" %s:%s -> %s"
),
name,
self.network,
ip_address,
src_port,
self.alternative_target_proxy.url,
)
resource = self.compute.create_forwarding_rule(
name,
src_port,
self.alternative_target_proxy,
self.network_url,
ip_address=ip_address,
)
self.alternative_forwarding_rule = resource
return resource
def delete_alternative_forwarding_rule(self, force=False):
if force:
name = self.make_resource_name(
self.ALTERNATIVE_FORWARDING_RULE_NAME
)
elif self.alternative_forwarding_rule:
name = self.alternative_forwarding_rule.name
else:
return
logger.info('Deleting alternative Forwarding rule "%s"', name)
self.compute.delete_forwarding_rule(name)
self.alternative_forwarding_rule = None
def create_firewall_rule(self, allowed_ports: List[str]):
name = self.make_resource_name(self.FIREWALL_RULE_NAME)
logging.info(
'Creating firewall rule "%s" in network "%s" with allowed ports %s',
name,
self.network,
allowed_ports,
)
resource = self.compute.create_firewall_rule(
name,
self.network_url,
xds_flags.FIREWALL_SOURCE_RANGE.value,
allowed_ports,
)
self.firewall_rule = resource
def delete_firewall_rule(self, force=False):
"""The firewall rule won't be automatically removed."""
if force:
name = self.make_resource_name(self.FIREWALL_RULE_NAME)
elif self.firewall_rule:
name = self.firewall_rule.name
else:
return
logger.info('Deleting Firewall Rule "%s"', name)
self.compute.delete_firewall_rule(name)
self.firewall_rule = None
class TrafficDirectorAppNetManager(TrafficDirectorManager):
GRPC_ROUTE_NAME = "grpc-route"
MESH_NAME = "mesh"
netsvc: _NetworkServicesV1Alpha1
def __init__(
self,
gcp_api_manager: gcp.api.GcpApiManager,
project: str,
*,
resource_prefix: str,
resource_suffix: Optional[str] = None,
network: str = "default",
compute_api_version: str = "v1",
):
super().__init__(
gcp_api_manager,
project,
resource_prefix=resource_prefix,
resource_suffix=resource_suffix,
network=network,
compute_api_version=compute_api_version,
)
# API
self.netsvc = _NetworkServicesV1Alpha1(gcp_api_manager, project)
# Managed resources
# TODO(gnossen) PTAL at the pylint error
self.grpc_route: Optional[GrpcRoute] = None
self.mesh: Optional[Mesh] = None
def create_mesh(self) -> GcpResource:
name = self.make_resource_name(self.MESH_NAME)
logger.info("Creating Mesh %s", name)
body = {}
resource = self.netsvc.create_mesh(name, body)
self.mesh = self.netsvc.get_mesh(name)
logger.debug("Loaded Mesh: %s", self.mesh)
return resource
def delete_mesh(self, force=False):
if force:
name = self.make_resource_name(self.MESH_NAME)
elif self.mesh:
name = self.mesh.name
else:
return
logger.info("Deleting Mesh %s", name)
self.netsvc.delete_mesh(name)
self.mesh = None
def create_grpc_route(self, src_host: str, src_port: int) -> GcpResource:
host = f"{src_host}:{src_port}"
service_name = self.netsvc.resource_full_name(
self.backend_service.name, "backendServices"
)
body = {
"meshes": [self.mesh.url],
"hostnames": host,
"rules": [
{"action": {"destinations": [{"serviceName": service_name}]}}
],
}
name = self.make_resource_name(self.GRPC_ROUTE_NAME)
logger.info("Creating GrpcRoute %s", name)
resource = self.netsvc.create_grpc_route(name, body)
self.grpc_route = self.netsvc.get_grpc_route(name)
logger.debug("Loaded GrpcRoute: %s", self.grpc_route)
return resource
def create_grpc_route_with_content(self, body: Any) -> GcpResource:
name = self.make_resource_name(self.GRPC_ROUTE_NAME)
logger.info("Creating GrpcRoute %s", name)
resource = self.netsvc.create_grpc_route(name, body)
self.grpc_route = self.netsvc.get_grpc_route(name)
logger.debug("Loaded GrpcRoute: %s", self.grpc_route)
return resource
def delete_grpc_route(self, force=False):
if force:
name = self.make_resource_name(self.GRPC_ROUTE_NAME)
elif self.grpc_route:
name = self.grpc_route.name
else:
return
logger.info("Deleting GrpcRoute %s", name)
self.netsvc.delete_grpc_route(name)
self.grpc_route = None
def cleanup(self, *, force=False):
self.delete_grpc_route(force=force)
self.delete_mesh(force=force)
super().cleanup(force=force)
class TrafficDirectorSecureManager(TrafficDirectorManager):
SERVER_TLS_POLICY_NAME = "server-tls-policy"
CLIENT_TLS_POLICY_NAME = "client-tls-policy"
AUTHZ_POLICY_NAME = "authz-policy"
ENDPOINT_POLICY = "endpoint-policy"
CERTIFICATE_PROVIDER_INSTANCE = "google_cloud_private_spiffe"
netsec: _NetworkSecurityV1Beta1
netsvc: _NetworkServicesV1Beta1
def __init__(
self,
gcp_api_manager: gcp.api.GcpApiManager,
project: str,
*,
resource_prefix: str,
resource_suffix: Optional[str] = None,
network: str = "default",
compute_api_version: str = "v1",
):
super().__init__(
gcp_api_manager,
project,
resource_prefix=resource_prefix,
resource_suffix=resource_suffix,
network=network,
compute_api_version=compute_api_version,
)
# API
self.netsec = _NetworkSecurityV1Beta1(gcp_api_manager, project)
self.netsvc = _NetworkServicesV1Beta1(gcp_api_manager, project)
# Managed resources
self.server_tls_policy: Optional[ServerTlsPolicy] = None
self.client_tls_policy: Optional[ClientTlsPolicy] = None
self.authz_policy: Optional[AuthorizationPolicy] = None
self.endpoint_policy: Optional[EndpointPolicy] = None
def setup_server_security(
self, *, server_namespace, server_name, server_port, tls=True, mtls=True
):
self.create_server_tls_policy(tls=tls, mtls=mtls)
self.create_endpoint_policy(
server_namespace=server_namespace,
server_name=server_name,
server_port=server_port,
)
def setup_client_security(
self, *, server_namespace, server_name, tls=True, mtls=True
):
self.create_client_tls_policy(tls=tls, mtls=mtls)
self.backend_service_apply_client_mtls_policy(
server_namespace, server_name
)
def cleanup(self, *, force=False):
# Cleanup in the reverse order of creation
super().cleanup(force=force)
self.delete_endpoint_policy(force=force)
self.delete_server_tls_policy(force=force)
self.delete_client_tls_policy(force=force)
self.delete_authz_policy(force=force)
def create_server_tls_policy(self, *, tls, mtls):
name = self.make_resource_name(self.SERVER_TLS_POLICY_NAME)
logger.info("Creating Server TLS Policy %s", name)
if not tls and not mtls:
logger.warning(
(
"Server TLS Policy %s neither TLS, nor mTLS "
"policy. Skipping creation"
),
name,
)
return
certificate_provider = self._get_certificate_provider()
policy = {}
if tls:
policy["serverCertificate"] = certificate_provider
if mtls:
policy["mtlsPolicy"] = {
"clientValidationCa": [certificate_provider],
}
self.netsec.create_server_tls_policy(name, policy)
self.server_tls_policy = self.netsec.get_server_tls_policy(name)
logger.debug("Server TLS Policy loaded: %r", self.server_tls_policy)
def delete_server_tls_policy(self, force=False):
if force:
name = self.make_resource_name(self.SERVER_TLS_POLICY_NAME)
elif self.server_tls_policy:
name = self.server_tls_policy.name
else:
return
logger.info("Deleting Server TLS Policy %s", name)
self.netsec.delete_server_tls_policy(name)
self.server_tls_policy = None
def create_authz_policy(self, *, action: str, rules: list):
name = self.make_resource_name(self.AUTHZ_POLICY_NAME)
logger.info("Creating Authz Policy %s", name)
policy = {
"action": action,
"rules": rules,
}
self.netsec.create_authz_policy(name, policy)
self.authz_policy = self.netsec.get_authz_policy(name)
logger.debug("Authz Policy loaded: %r", self.authz_policy)
def delete_authz_policy(self, force=False):
if force:
name = self.make_resource_name(self.AUTHZ_POLICY_NAME)
elif self.authz_policy:
name = self.authz_policy.name
else:
return
logger.info("Deleting Authz Policy %s", name)
self.netsec.delete_authz_policy(name)
self.authz_policy = None
def create_endpoint_policy(
self, *, server_namespace: str, server_name: str, server_port: int
) -> None:
name = self.make_resource_name(self.ENDPOINT_POLICY)
logger.info("Creating Endpoint Policy %s", name)
endpoint_matcher_labels = [
{
"labelName": "app",
"labelValue": f"{server_namespace}-{server_name}",
}
]
port_selector = {"ports": [str(server_port)]}
label_matcher_all = {
"metadataLabelMatchCriteria": "MATCH_ALL",
"metadataLabels": endpoint_matcher_labels,
}
config = {
"type": "GRPC_SERVER",
"trafficPortSelector": port_selector,
"endpointMatcher": {
"metadataLabelMatcher": label_matcher_all,
},
}
if self.server_tls_policy:
config["serverTlsPolicy"] = self.server_tls_policy.name
else:
logger.warning(
(
"Creating Endpoint Policy %s with "
"no Server TLS policy attached"
),
name,
)
if self.authz_policy:
config["authorizationPolicy"] = self.authz_policy.name
self.netsvc.create_endpoint_policy(name, config)
self.endpoint_policy = self.netsvc.get_endpoint_policy(name)
logger.debug("Loaded Endpoint Policy: %r", self.endpoint_policy)
def delete_endpoint_policy(self, force: bool = False) -> None:
if force:
name = self.make_resource_name(self.ENDPOINT_POLICY)
elif self.endpoint_policy:
name = self.endpoint_policy.name
else:
return
logger.info("Deleting Endpoint Policy %s", name)
self.netsvc.delete_endpoint_policy(name)
self.endpoint_policy = None
def create_client_tls_policy(self, *, tls, mtls):
name = self.make_resource_name(self.CLIENT_TLS_POLICY_NAME)
logger.info("Creating Client TLS Policy %s", name)
if not tls and not mtls:
logger.warning(
(
"Client TLS Policy %s neither TLS, nor mTLS "
"policy. Skipping creation"
),
name,
)
return
certificate_provider = self._get_certificate_provider()
policy = {}
if tls:
policy["serverValidationCa"] = [certificate_provider]
if mtls:
policy["clientCertificate"] = certificate_provider
self.netsec.create_client_tls_policy(name, policy)
self.client_tls_policy = self.netsec.get_client_tls_policy(name)
logger.debug("Client TLS Policy loaded: %r", self.client_tls_policy)
def delete_client_tls_policy(self, force=False):
if force:
name = self.make_resource_name(self.CLIENT_TLS_POLICY_NAME)
elif self.client_tls_policy:
name = self.client_tls_policy.name
else:
return
logger.info("Deleting Client TLS Policy %s", name)
self.netsec.delete_client_tls_policy(name)
self.client_tls_policy = None
def backend_service_apply_client_mtls_policy(
self,
server_namespace,
server_name,
):
if not self.client_tls_policy:
logger.warning(
(
"Client TLS policy not created, "
"skipping attaching to Backend Service %s"
),
self.backend_service.name,
)
return
server_spiffe = (
f"spiffe://{self.project}.svc.id.goog/"
f"ns/{server_namespace}/sa/{server_name}"
)
logging.info(
"Adding Client TLS Policy to Backend Service %s: %s, server %s",
self.backend_service.name,
self.client_tls_policy.url,
server_spiffe,
)
self.compute.patch_backend_service(
self.backend_service,
{
"securitySettings": {
"clientTlsPolicy": self.client_tls_policy.url,
"subjectAltNames": [server_spiffe],
}
},
)
@classmethod
def _get_certificate_provider(cls):
return {
"certificateProviderInstance": {
"pluginInstance": cls.CERTIFICATE_PROVIDER_INSTANCE,
},
}
| 40,139
| 35.893382
| 80
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/infrastructure/k8s.py
|
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO(sergiitk): to k8s/ package, and get rid of k8s_internal, which is only
# added to get around circular dependencies caused by k8s.py clashing with
# k8s/__init__.py
import datetime
import json
import logging
import pathlib
import threading
from typing import Any, Callable, List, Optional, Tuple
from kubernetes import client
from kubernetes import utils
import kubernetes.config
import urllib3.exceptions
import yaml
from framework.helpers import retryers
import framework.helpers.highlighter
from framework.infrastructure.k8s_internal import k8s_log_collector
from framework.infrastructure.k8s_internal import k8s_port_forwarder
logger = logging.getLogger(__name__)
# Type aliases
_HighlighterYaml = framework.helpers.highlighter.HighlighterYaml
PodLogCollector = k8s_log_collector.PodLogCollector
PortForwarder = k8s_port_forwarder.PortForwarder
ApiClient = client.ApiClient
V1Deployment = client.V1Deployment
V1ServiceAccount = client.V1ServiceAccount
V1Pod = client.V1Pod
V1PodList = client.V1PodList
V1Service = client.V1Service
V1Namespace = client.V1Namespace
_timedelta = datetime.timedelta
_ApiException = client.ApiException
_FailToCreateError = utils.FailToCreateError
_RETRY_ON_EXCEPTIONS = (
urllib3.exceptions.HTTPError,
_ApiException,
_FailToCreateError,
)
def _server_restart_retryer() -> retryers.Retrying:
return retryers.exponential_retryer_with_timeout(
retry_on_exceptions=_RETRY_ON_EXCEPTIONS,
wait_min=_timedelta(seconds=1),
wait_max=_timedelta(seconds=10),
timeout=_timedelta(minutes=3),
)
def _too_many_requests_retryer() -> retryers.Retrying:
return retryers.exponential_retryer_with_timeout(
retry_on_exceptions=_RETRY_ON_EXCEPTIONS,
wait_min=_timedelta(seconds=10),
wait_max=_timedelta(seconds=30),
timeout=_timedelta(minutes=3),
)
def _quick_recovery_retryer() -> retryers.Retrying:
return retryers.constant_retryer(
wait_fixed=_timedelta(seconds=1),
attempts=3,
retry_on_exceptions=_RETRY_ON_EXCEPTIONS,
)
def label_dict_to_selector(labels: dict) -> str:
return ",".join(f"{k}=={v}" for k, v in labels.items())
class NotFound(Exception):
"""Indicates the resource is not found on the API server."""
class KubernetesApiManager:
_client: ApiClient
context: str
apps: client.AppsV1Api
core: client.CoreV1Api
_apis: set
def __init__(self, context: str):
self.context = context
self._client = self._new_client_from_context(context)
self.apps = client.AppsV1Api(self.client)
self.core = client.CoreV1Api(self.client)
self._apis = {self.apps, self.core}
@property
def client(self) -> ApiClient:
return self._client
def close(self):
self.client.close()
def reload(self):
self.close()
self._client = self._new_client_from_context(self.context)
# Update default configuration so that modules that initialize
# ApiClient implicitly (e.g. kubernetes.watch.Watch) get the updates.
client.Configuration.set_default(self._client.configuration)
for api in self._apis:
api.api_client = self._client
@staticmethod
def _new_client_from_context(context: str) -> ApiClient:
client_instance = kubernetes.config.new_client_from_config(
context=context
)
logger.info(
'Using kubernetes context "%s", active host: %s',
context,
client_instance.configuration.host,
)
# TODO(sergiitk): fine-tune if we see the total wait unreasonably long.
client_instance.configuration.retries = 10
return client_instance
class KubernetesNamespace: # pylint: disable=too-many-public-methods
_highlighter: framework.helpers.highlighter.Highlighter
_api: KubernetesApiManager
_name: str
NEG_STATUS_META = "cloud.google.com/neg-status"
DELETE_GRACE_PERIOD_SEC: int = 5
WAIT_SHORT_TIMEOUT_SEC: int = 60
WAIT_SHORT_SLEEP_SEC: int = 1
WAIT_MEDIUM_TIMEOUT_SEC: int = 5 * 60
WAIT_MEDIUM_SLEEP_SEC: int = 10
WAIT_LONG_TIMEOUT_SEC: int = 10 * 60
WAIT_LONG_SLEEP_SEC: int = 30
WAIT_POD_START_TIMEOUT_SEC: int = 3 * 60
def __init__(self, api: KubernetesApiManager, name: str):
self._api = api
self._name = name
self._highlighter = _HighlighterYaml()
@property
def name(self):
return self._name
def _refresh_auth(self):
logger.info("Reloading k8s api client to refresh the auth.")
self._api.reload()
def _apply_manifest(self, manifest):
return utils.create_from_dict(
self._api.client, manifest, namespace=self.name
)
def _get_resource(self, method: Callable[[Any], object], *args, **kwargs):
try:
return self._execute(method, *args, **kwargs)
except NotFound:
# Instead of trowing an error when a resource doesn't exist,
# just return None.
return None
def _execute(self, method: Callable[[Any], object], *args, **kwargs):
# Note: Intentionally leaving return type as unspecified to not confuse
# pytype for methods that delegate calls to this wrapper.
try:
return method(*args, **kwargs)
except _RETRY_ON_EXCEPTIONS as err:
retryer = self._handle_exception(err)
if retryer is not None:
return retryer(method, *args, **kwargs)
raise
def _handle_exception(self, err: Exception) -> Optional[retryers.Retrying]:
# TODO(sergiitk): replace returns with match/case when we use to py3.10.
# pylint: disable=too-many-return-statements
# Unwrap MaxRetryError.
if isinstance(err, urllib3.exceptions.MaxRetryError):
return self._handle_exception(err.reason) if err.reason else None
# We consider all `NewConnectionError`s as caused by a k8s
# API server restart. `NewConnectionError`s we've seen:
# - [Errno 110] Connection timed out
# - [Errno 111] Connection refused
if isinstance(err, urllib3.exceptions.NewConnectionError):
return _server_restart_retryer()
# We consider all `ProtocolError`s with "Connection aborted" message
# as caused by a k8s API server restart.
# `ProtocolError`s we've seen:
# - RemoteDisconnected('Remote end closed connection
# without response')
# - ConnectionResetError(104, 'Connection reset by peer')
if isinstance(err, urllib3.exceptions.ProtocolError):
if "connection aborted" in str(err).lower():
return _server_restart_retryer()
else:
# To cover other cases we didn't account for, and haven't
# seen in the wild, f.e. "Connection broken"
return _quick_recovery_retryer()
# ApiException means the server has received our request and responded
# with an error we can parse (except a few corner cases, f.e. SSLError).
if isinstance(err, _ApiException):
return self._handle_api_exception(err)
# Unwrap FailToCreateError.
if isinstance(err, _FailToCreateError):
# We're always sending a single document, so we expect
# a single wrapped exception in return.
if len(err.api_exceptions) == 1:
return self._handle_exception(err.api_exceptions[0])
return None
def _handle_api_exception(
self, err: _ApiException
) -> Optional[retryers.Retrying]:
# TODO(sergiitk): replace returns with match/case when we use to py3.10.
# pylint: disable=too-many-return-statements
# TODO(sergiitk): can I chain the retryers?
logger.debug(
"Handling k8s.ApiException: status=%s reason=%s body=%s headers=%s",
err.status,
err.reason,
err.body,
err.headers,
)
code: int = err.status
body = err.body.lower() if err.body else ""
# 401 Unauthorized: token might be expired, attempt auth refresh.
if code == 401:
self._refresh_auth()
return _quick_recovery_retryer()
# 404 Not Found. Make it easier for the caller to handle 404s.
if code == 404:
raise NotFound(
"Kubernetes API returned 404 Not Found: "
f"{self._status_message_or_body(body)}"
) from err
# 409 Conflict
# "Operation cannot be fulfilled on resourcequotas "foo": the object
# has been modified; please apply your changes to the latest version
# and try again".
# See https://github.com/kubernetes/kubernetes/issues/67761
if code == 409:
return _quick_recovery_retryer()
# 429 Too Many Requests: "Too many requests, please try again later"
if code == 429:
return _too_many_requests_retryer()
# 500 Internal Server Error
if code == 500:
# Observed when using `kubectl proxy`.
# "dial tcp 127.0.0.1:8080: connect: connection refused"
if "connection refused" in body:
return _server_restart_retryer()
# Known 500 errors that should be treated as 429:
# - Internal Server Error: "/api/v1/namespaces": the server has
# received too many requests and has asked us
# to try again later
# - Internal Server Error: "/api/v1/namespaces/foo/services":
# the server is currently unable to handle the request
if (
"too many requests" in body
or "currently unable to handle the request" in body
):
return _too_many_requests_retryer()
# In other cases, just retry a few times in case the server
# resumes normal operation.
return _quick_recovery_retryer()
# 504 Gateway Timeout:
# "Timeout: request did not complete within the allotted timeout"
if code == 504:
return _server_restart_retryer()
return None
@classmethod
def _status_message_or_body(cls, body: str) -> str:
try:
return str(json.loads(body)["message"])
except (KeyError, ValueError):
return body
def create_single_resource(self, manifest):
return self._execute(self._apply_manifest, manifest)
def get_service(self, name) -> V1Service:
return self._get_resource(
self._api.core.read_namespaced_service, name, self.name
)
def get_service_account(self, name) -> V1Service:
return self._get_resource(
self._api.core.read_namespaced_service_account, name, self.name
)
def delete_service(
self, name, grace_period_seconds=DELETE_GRACE_PERIOD_SEC
):
self._execute(
self._api.core.delete_namespaced_service,
name=name,
namespace=self.name,
body=client.V1DeleteOptions(
propagation_policy="Foreground",
grace_period_seconds=grace_period_seconds,
),
)
def delete_service_account(
self, name, grace_period_seconds=DELETE_GRACE_PERIOD_SEC
):
self._execute(
self._api.core.delete_namespaced_service_account,
name=name,
namespace=self.name,
body=client.V1DeleteOptions(
propagation_policy="Foreground",
grace_period_seconds=grace_period_seconds,
),
)
def get(self) -> V1Namespace:
return self._get_resource(self._api.core.read_namespace, self.name)
def delete(self, grace_period_seconds=DELETE_GRACE_PERIOD_SEC):
self._execute(
self._api.core.delete_namespace,
name=self.name,
body=client.V1DeleteOptions(
propagation_policy="Foreground",
grace_period_seconds=grace_period_seconds,
),
)
def wait_for_service_deleted(
self,
name: str,
timeout_sec: int = WAIT_SHORT_TIMEOUT_SEC,
wait_sec: int = WAIT_SHORT_SLEEP_SEC,
) -> None:
retryer = retryers.constant_retryer(
wait_fixed=_timedelta(seconds=wait_sec),
timeout=_timedelta(seconds=timeout_sec),
check_result=lambda service: service is None,
)
retryer(self.get_service, name)
def wait_for_service_account_deleted(
self,
name: str,
timeout_sec: int = WAIT_SHORT_TIMEOUT_SEC,
wait_sec: int = WAIT_SHORT_SLEEP_SEC,
) -> None:
retryer = retryers.constant_retryer(
wait_fixed=_timedelta(seconds=wait_sec),
timeout=_timedelta(seconds=timeout_sec),
check_result=lambda service_account: service_account is None,
)
retryer(self.get_service_account, name)
def wait_for_namespace_deleted(
self,
timeout_sec: int = WAIT_LONG_TIMEOUT_SEC,
wait_sec: int = WAIT_LONG_SLEEP_SEC,
) -> None:
retryer = retryers.constant_retryer(
wait_fixed=_timedelta(seconds=wait_sec),
timeout=_timedelta(seconds=timeout_sec),
check_result=lambda namespace: namespace is None,
)
retryer(self.get)
def wait_for_service_neg(
self,
name: str,
timeout_sec: int = WAIT_SHORT_TIMEOUT_SEC,
wait_sec: int = WAIT_SHORT_SLEEP_SEC,
) -> None:
timeout = _timedelta(seconds=timeout_sec)
retryer = retryers.constant_retryer(
wait_fixed=_timedelta(seconds=wait_sec),
timeout=timeout,
check_result=self._check_service_neg_annotation,
)
try:
retryer(self.get_service, name)
except retryers.RetryError as e:
logger.error(
(
"Timeout %s (h:mm:ss) waiting for service %s to report NEG "
"status. Last service status:\n%s"
),
timeout,
name,
self._pretty_format_status(e.result()),
)
raise
def get_service_neg(
self, service_name: str, service_port: int
) -> Tuple[str, List[str]]:
service = self.get_service(service_name)
neg_info: dict = json.loads(
service.metadata.annotations[self.NEG_STATUS_META]
)
neg_name: str = neg_info["network_endpoint_groups"][str(service_port)]
neg_zones: List[str] = neg_info["zones"]
return neg_name, neg_zones
def get_deployment(self, name) -> V1Deployment:
return self._get_resource(
self._api.apps.read_namespaced_deployment, name, self.name
)
def delete_deployment(
self, name: str, grace_period_seconds: int = DELETE_GRACE_PERIOD_SEC
) -> None:
self._execute(
self._api.apps.delete_namespaced_deployment,
name=name,
namespace=self.name,
body=client.V1DeleteOptions(
propagation_policy="Foreground",
grace_period_seconds=grace_period_seconds,
),
)
def list_deployment_pods(self, deployment: V1Deployment) -> List[V1Pod]:
# V1LabelSelector.match_expressions not supported at the moment
return self.list_pods_with_labels(deployment.spec.selector.match_labels)
def wait_for_deployment_available_replicas(
self,
name: str,
count: int = 1,
timeout_sec: int = WAIT_MEDIUM_TIMEOUT_SEC,
wait_sec: int = WAIT_SHORT_SLEEP_SEC,
) -> None:
timeout = _timedelta(seconds=timeout_sec)
retryer = retryers.constant_retryer(
wait_fixed=_timedelta(seconds=wait_sec),
timeout=timeout,
check_result=lambda depl: self._replicas_available(depl, count),
)
try:
retryer(self.get_deployment, name)
except retryers.RetryError as e:
logger.error(
(
"Timeout %s (h:mm:ss) waiting for deployment %s to report"
" %i replicas available. Last status:\n%s"
),
timeout,
name,
count,
self._pretty_format_status(e.result()),
)
raise
def wait_for_deployment_replica_count(
self,
deployment: V1Deployment,
count: int = 1,
*,
timeout_sec: int = WAIT_MEDIUM_TIMEOUT_SEC,
wait_sec: int = WAIT_SHORT_SLEEP_SEC,
) -> None:
timeout = _timedelta(seconds=timeout_sec)
retryer = retryers.constant_retryer(
wait_fixed=_timedelta(seconds=wait_sec),
timeout=timeout,
check_result=lambda pods: len(pods) == count,
)
try:
retryer(self.list_deployment_pods, deployment)
except retryers.RetryError as e:
result = e.result(default=[])
logger.error(
(
"Timeout %s (h:mm:ss) waiting for pod count %i, got: %i. "
"Pod statuses:\n%s"
),
timeout,
count,
len(result),
self._pretty_format_statuses(result),
)
raise
def wait_for_deployment_deleted(
self,
deployment_name: str,
timeout_sec: int = WAIT_MEDIUM_TIMEOUT_SEC,
wait_sec: int = WAIT_MEDIUM_SLEEP_SEC,
) -> None:
retryer = retryers.constant_retryer(
wait_fixed=_timedelta(seconds=wait_sec),
timeout=_timedelta(seconds=timeout_sec),
check_result=lambda deployment: deployment is None,
)
retryer(self.get_deployment, deployment_name)
def list_pods_with_labels(self, labels: dict) -> List[V1Pod]:
pod_list: V1PodList = self._execute(
self._api.core.list_namespaced_pod,
self.name,
label_selector=label_dict_to_selector(labels),
)
return pod_list.items
def get_pod(self, name: str) -> V1Pod:
return self._get_resource(
self._api.core.read_namespaced_pod, name, self.name
)
def wait_for_pod_started(
self,
pod_name: str,
timeout_sec: int = WAIT_POD_START_TIMEOUT_SEC,
wait_sec: int = WAIT_SHORT_SLEEP_SEC,
) -> None:
timeout = _timedelta(seconds=timeout_sec)
retryer = retryers.constant_retryer(
wait_fixed=_timedelta(seconds=wait_sec),
timeout=timeout,
check_result=self._pod_started,
)
try:
retryer(self.get_pod, pod_name)
except retryers.RetryError as e:
logger.error(
(
"Timeout %s (h:mm:ss) waiting for pod %s to start. "
"Pod status:\n%s"
),
timeout,
pod_name,
self._pretty_format_status(e.result()),
)
raise
def port_forward_pod(
self,
pod: V1Pod,
remote_port: int,
local_port: Optional[int] = None,
local_address: Optional[str] = None,
) -> k8s_port_forwarder.PortForwarder:
pf = k8s_port_forwarder.PortForwarder(
self._api.context,
self.name,
f"pod/{pod.metadata.name}",
remote_port,
local_port,
local_address,
)
pf.connect()
return pf
def pod_start_logging(
self,
*,
pod_name: str,
log_path: pathlib.Path,
log_stop_event: threading.Event,
log_to_stdout: bool = False,
log_timestamps: bool = False,
) -> PodLogCollector:
pod_log_collector = PodLogCollector(
pod_name=pod_name,
namespace_name=self.name,
read_pod_log_fn=self._api.core.read_namespaced_pod_log,
stop_event=log_stop_event,
log_path=log_path,
log_to_stdout=log_to_stdout,
log_timestamps=log_timestamps,
)
pod_log_collector.start()
return pod_log_collector
def _pretty_format_statuses(
self, k8s_objects: List[Optional[object]]
) -> str:
return "\n".join(
self._pretty_format_status(k8s_object) for k8s_object in k8s_objects
)
def _pretty_format_status(self, k8s_object: Optional[object]) -> str:
if k8s_object is None:
return "No data"
# Parse the name if present.
if hasattr(k8s_object, "metadata") and hasattr(
k8s_object.metadata, "name"
):
name = k8s_object.metadata.name
else:
name = "Can't parse resource name"
# Pretty-print the status if present.
if hasattr(k8s_object, "status"):
try:
status = self._pretty_format(k8s_object.status.to_dict())
except Exception as e: # pylint: disable=broad-except
# Catching all exceptions because not printing the status
# isn't as important as the system under test.
status = f"Can't parse resource status: {e}"
else:
status = "Can't parse resource status"
# Return the name of k8s object, and its pretty-printed status.
return f"{name}:\n{status}\n"
def _pretty_format(self, data: dict) -> str:
"""Return a string with pretty-printed yaml data from a python dict."""
yaml_out: str = yaml.dump(data, explicit_start=True, explicit_end=True)
return self._highlighter.highlight(yaml_out)
@classmethod
def _check_service_neg_annotation(
cls, service: Optional[V1Service]
) -> bool:
return (
isinstance(service, V1Service)
and cls.NEG_STATUS_META in service.metadata.annotations
)
@classmethod
def _pod_started(cls, pod: V1Pod) -> bool:
return isinstance(pod, V1Pod) and pod.status.phase not in (
"Pending",
"Unknown",
)
@classmethod
def _replicas_available(cls, deployment: V1Deployment, count: int) -> bool:
return (
isinstance(deployment, V1Deployment)
and deployment.status.available_replicas is not None
and deployment.status.available_replicas >= count
)
| 23,271
| 33.630952
| 80
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/infrastructure/__init__.py
|
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 577
| 40.285714
| 74
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/infrastructure/k8s_internal/__init__.py
|
# Copyright 2022 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 577
| 40.285714
| 74
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/infrastructure/k8s_internal/k8s_port_forwarder.py
|
# Copyright 2022 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
import subprocess
import time
from typing import Optional
logger = logging.getLogger(__name__)
class PortForwardingError(Exception):
"""Error forwarding port"""
class PortForwarder:
PORT_FORWARD_LOCAL_ADDRESS: str = "127.0.0.1"
def __init__(
self,
context: str,
namespace: str,
destination: str,
remote_port: int,
local_port: Optional[int] = None,
local_address: Optional[str] = None,
):
self.context = context
self.namespace = namespace
self.destination = destination
self.remote_port = remote_port
self.local_address = local_address or self.PORT_FORWARD_LOCAL_ADDRESS
self.local_port: Optional[int] = local_port
self.subprocess: Optional[subprocess.Popen] = None
def connect(self) -> None:
if self.local_port:
port_mapping = f"{self.local_port}:{self.remote_port}"
else:
port_mapping = f":{self.remote_port}"
cmd = [
"kubectl",
"--context",
self.context,
"--namespace",
self.namespace,
"port-forward",
"--address",
self.local_address,
self.destination,
port_mapping,
]
logger.debug(
"Executing port forwarding subprocess cmd: %s", " ".join(cmd)
)
self.subprocess = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
# Wait for stdout line indicating successful start.
if self.local_port:
local_port_expected = (
f"Forwarding from {self.local_address}:{self.local_port}"
f" -> {self.remote_port}"
)
else:
local_port_re = re.compile(
f"Forwarding from {self.local_address}:([0-9]+) ->"
f" {self.remote_port}"
)
try:
while True:
time.sleep(0.05)
output = self.subprocess.stdout.readline().strip()
if not output:
return_code = self.subprocess.poll()
if return_code is not None:
errors = [
error
for error in self.subprocess.stdout.readlines()
]
raise PortForwardingError(
"Error forwarding port, kubectl return "
f"code {return_code}, output {errors}"
)
# If there is no output, and the subprocess is not exiting,
# continue waiting for the log line.
continue
# Validate output log
if self.local_port:
if output != local_port_expected:
raise PortForwardingError(
f"Error forwarding port, unexpected output {output}"
)
else:
groups = local_port_re.search(output)
if groups is None:
raise PortForwardingError(
f"Error forwarding port, unexpected output {output}"
)
# Update local port to the randomly picked one
self.local_port = int(groups[1])
logger.info(output)
break
except Exception:
self.close()
raise
def close(self) -> None:
if self.subprocess is not None:
logger.info(
"Shutting down port forwarding, pid %s", self.subprocess.pid
)
self.subprocess.kill()
stdout, _ = self.subprocess.communicate(timeout=5)
logger.info("Port forwarding stopped")
logger.debug("Port forwarding remaining stdout: %s", stdout)
self.subprocess = None
| 4,683
| 33.955224
| 80
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/infrastructure/k8s_internal/k8s_log_collector.py
|
# Copyright 2022 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pathlib
import threading
from typing import Any, Callable, Optional, TextIO
from kubernetes import client
from kubernetes.watch import watch
logger = logging.getLogger(__name__)
class PodLogCollector(threading.Thread):
"""A thread that streams logs from the remote pod to a local file."""
pod_name: str
namespace_name: str
stop_event: threading.Event
drain_event: threading.Event
log_path: pathlib.Path
log_to_stdout: bool
log_timestamps: bool
error_backoff_sec: int
_out_stream: Optional[TextIO]
_watcher: Optional[watch.Watch]
_read_pod_log_fn: Callable[..., Any]
def __init__(
self,
*,
pod_name: str,
namespace_name: str,
read_pod_log_fn: Callable[..., Any],
stop_event: threading.Event,
log_path: pathlib.Path,
log_to_stdout: bool = False,
log_timestamps: bool = False,
error_backoff_sec: int = 5,
):
self.pod_name = pod_name
self.namespace_name = namespace_name
self.stop_event = stop_event
# Used to indicate log draining happened. Turned out to be not as useful
# in cases when the logging happens rarely because the blocking happens
# in the native code, which doesn't yield until the next log message.
self.drain_event = threading.Event()
self.log_path = log_path
self.log_to_stdout = log_to_stdout
self.log_timestamps = log_timestamps
self.error_backoff_sec = error_backoff_sec
self._read_pod_log_fn = read_pod_log_fn
self._out_stream = None
self._watcher = None
super().__init__(name=f"pod-log-{pod_name}", daemon=True)
def run(self):
logger.info(
"Starting log collection thread %i for %s",
self.ident,
self.pod_name,
)
try:
self._out_stream = open(
self.log_path, "w", errors="ignore", encoding="utf-8"
)
while not self.stop_event.is_set():
self._stream_log()
finally:
self._stop()
def flush(self):
"""Flushes the log file buffer. May be called from the main thread."""
if self._out_stream:
self._out_stream.flush()
os.fsync(self._out_stream.fileno())
def _stop(self):
if self._watcher is not None:
self._watcher.stop()
self._watcher = None
if self._out_stream is not None:
self._write(
f"Finished log collection for pod {self.pod_name}",
force_flush=True,
)
self._out_stream.close()
self._out_stream = None
self.drain_event.set()
def _stream_log(self):
try:
self._restart_stream()
except client.ApiException as e:
self._write(f"Exception fetching logs: {e}")
self._write(
(
f"Restarting log fetching in {self.error_backoff_sec} sec. "
"Will attempt to read from the beginning, but log "
"truncation may occur."
),
force_flush=True,
)
finally:
# Instead of time.sleep(), we're waiting on the stop event
# in case it gets set earlier.
self.stop_event.wait(timeout=self.error_backoff_sec)
def _restart_stream(self):
self._watcher = watch.Watch()
for msg in self._watcher.stream(
self._read_pod_log_fn,
name=self.pod_name,
namespace=self.namespace_name,
timestamps=self.log_timestamps,
follow=True,
):
self._write(msg)
# Every message check if a stop is requested.
if self.stop_event.is_set():
self._stop()
return
def _write(self, msg: str, force_flush: bool = False):
self._out_stream.write(msg)
self._out_stream.write("\n")
if force_flush:
self.flush()
if self.log_to_stdout:
logger.info(msg)
| 4,746
| 32.195804
| 80
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/infrastructure/gcp/iam.py
|
# Copyright 2021 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import datetime
import functools
import logging
from typing import Any, Dict, FrozenSet, Optional
from framework.helpers import retryers
from framework.infrastructure import gcp
logger = logging.getLogger(__name__)
# Type aliases
_timedelta = datetime.timedelta
_HttpRequest = gcp.api.HttpRequest
class EtagConflict(gcp.api.Error):
"""
Indicates concurrent policy changes.
https://cloud.google.com/iam/docs/policies#etag
"""
def handle_etag_conflict(func):
def wrap_retry_on_etag_conflict(*args, **kwargs):
retryer = retryers.exponential_retryer_with_timeout(
retry_on_exceptions=(EtagConflict, gcp.api.TransportError),
wait_min=_timedelta(seconds=1),
wait_max=_timedelta(seconds=10),
timeout=_timedelta(minutes=2),
)
return retryer(func, *args, **kwargs)
return wrap_retry_on_etag_conflict
def _replace_binding(
policy: "Policy", binding: "Policy.Binding", new_binding: "Policy.Binding"
) -> "Policy":
new_bindings = set(policy.bindings)
new_bindings.discard(binding)
new_bindings.add(new_binding)
# pylint: disable=too-many-function-args # No idea why pylint is like that.
return dataclasses.replace(policy, bindings=frozenset(new_bindings))
@dataclasses.dataclass(frozen=True)
class ServiceAccount:
"""An IAM service account.
https://cloud.google.com/iam/docs/reference/rest/v1/projects.serviceAccounts
Note: "etag" field is skipped because it's deprecated
"""
name: str
projectId: str
uniqueId: str
email: str
oauth2ClientId: str
displayName: str = ""
description: str = ""
disabled: bool = False
@classmethod
def from_response(cls, response: Dict[str, Any]) -> "ServiceAccount":
return cls(
name=response["name"],
projectId=response["projectId"],
uniqueId=response["uniqueId"],
email=response["email"],
oauth2ClientId=response["oauth2ClientId"],
description=response.get("description", ""),
displayName=response.get("displayName", ""),
disabled=response.get("disabled", False),
)
def as_dict(self) -> Dict[str, Any]:
return dataclasses.asdict(self)
@dataclasses.dataclass(frozen=True)
class Expr:
"""
Represents a textual expression in the Common Expression Language syntax.
https://cloud.google.com/iam/docs/reference/rest/v1/Expr
"""
expression: str
title: str = ""
description: str = ""
location: str = ""
@classmethod
def from_response(cls, response: Dict[str, Any]) -> "Expr":
return cls(**response)
def as_dict(self) -> Dict[str, Any]:
return dataclasses.asdict(self)
@dataclasses.dataclass(frozen=True)
class Policy:
"""An Identity and Access Management (IAM) policy, which specifies
access controls for Google Cloud resources.
https://cloud.google.com/iam/docs/reference/rest/v1/Policy
Note: auditConfigs not supported by this implementation.
"""
@dataclasses.dataclass(frozen=True)
class Binding:
"""Policy Binding. Associates members with a role.
https://cloud.google.com/iam/docs/reference/rest/v1/Policy#binding
"""
role: str
members: FrozenSet[str]
condition: Optional[Expr] = None
@classmethod
def from_response(cls, response: Dict[str, Any]) -> "Policy.Binding":
fields = {
"role": response["role"],
"members": frozenset(response.get("members", [])),
}
if "condition" in response:
fields["condition"] = Expr.from_response(response["condition"])
return cls(**fields)
def as_dict(self) -> Dict[str, Any]:
result = {
"role": self.role,
"members": list(self.members),
}
if self.condition is not None:
result["condition"] = self.condition.as_dict()
return result
bindings: FrozenSet[Binding]
etag: str
version: Optional[int] = None
@functools.lru_cache(maxsize=128)
def find_binding_for_role(
self, role: str, condition: Optional[Expr] = None
) -> Optional["Policy.Binding"]:
results = (
binding
for binding in self.bindings
if binding.role == role and binding.condition == condition
)
return next(results, None)
@classmethod
def from_response(cls, response: Dict[str, Any]) -> "Policy":
bindings = frozenset(
cls.Binding.from_response(b) for b in response.get("bindings", [])
)
return cls(
bindings=bindings,
etag=response["etag"],
version=response.get("version"),
)
def as_dict(self) -> Dict[str, Any]:
result = {
"bindings": [binding.as_dict() for binding in self.bindings],
"etag": self.etag,
}
if self.version is not None:
result["version"] = self.version
return result
class IamV1(gcp.api.GcpProjectApiResource):
"""
Identity and Access Management (IAM) API.
https://cloud.google.com/iam/docs/reference/rest
"""
_service_accounts: gcp.api.discovery.Resource
# Operations that affect conditional role bindings must specify version 3.
# Otherwise conditions are omitted, and role names returned with a suffix,
# f.e. roles/iam.workloadIdentityUser_withcond_f1ec33c9beb41857dbf0
# https://cloud.google.com/iam/docs/reference/rest/v1/Policy#FIELDS.version
POLICY_VERSION: int = 3
def __init__(self, api_manager: gcp.api.GcpApiManager, project: str):
super().__init__(api_manager.iam("v1"), project)
# Shortcut to projects/*/serviceAccounts/ endpoints
self._service_accounts = self.api.projects().serviceAccounts()
def service_account_resource_name(self, account) -> str:
"""
Returns full resource name of the service account.
The resource name of the service account in the following format:
projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}.
The ACCOUNT value can be the email address or the uniqueId of the
service account.
Ref https://cloud.google.com/iam/docs/reference/rest/v1/projects.serviceAccounts/get
Args:
account: The ACCOUNT value
"""
return f"projects/{self.project}/serviceAccounts/{account}"
def get_service_account(self, account: str) -> ServiceAccount:
resource_name = self.service_account_resource_name(account)
request: _HttpRequest = self._service_accounts.get(name=resource_name)
response: Dict[str, Any] = self._execute(request)
logger.debug(
"Loaded Service Account:\n%s", self.resource_pretty_format(response)
)
return ServiceAccount.from_response(response)
def get_service_account_iam_policy(self, account: str) -> Policy:
resource_name = self.service_account_resource_name(account)
request: _HttpRequest = self._service_accounts.getIamPolicy(
resource=resource_name,
options_requestedPolicyVersion=self.POLICY_VERSION,
)
response: Dict[str, Any] = self._execute(request)
logger.debug(
"Loaded Service Account Policy:\n%s",
self.resource_pretty_format(response),
)
return Policy.from_response(response)
def set_service_account_iam_policy(
self, account: str, policy: Policy
) -> Policy:
"""Sets the IAM policy that is attached to a service account.
https://cloud.google.com/iam/docs/reference/rest/v1/projects.serviceAccounts/setIamPolicy
"""
resource_name = self.service_account_resource_name(account)
body = {"policy": policy.as_dict()}
logger.debug(
"Updating Service Account %s policy:\n%s",
account,
self.resource_pretty_format(body),
)
try:
request: _HttpRequest = self._service_accounts.setIamPolicy(
resource=resource_name, body=body
)
response: Dict[str, Any] = self._execute(request)
return Policy.from_response(response)
except gcp.api.ResponseError as error:
if error.status == 409:
# https://cloud.google.com/iam/docs/policies#etag
logger.debug(error)
raise EtagConflict from error
raise
@handle_etag_conflict
def add_service_account_iam_policy_binding(
self, account: str, role: str, member: str
) -> None:
"""Add an IAM policy binding to an IAM service account.
See for details on updating policy bindings:
https://cloud.google.com/iam/docs/reference/rest/v1/projects.serviceAccounts/setIamPolicy
"""
policy: Policy = self.get_service_account_iam_policy(account)
binding: Optional[Policy.Binding] = policy.find_binding_for_role(role)
if binding and member in binding.members:
logger.debug(
"Member %s already has role %s for Service Account %s",
member,
role,
account,
)
return
if binding is None:
updated_binding = Policy.Binding(role, frozenset([member]))
else:
updated_members: FrozenSet[str] = binding.members.union({member})
updated_binding: Policy.Binding = (
dataclasses.replace( # pylint: disable=too-many-function-args
binding, members=updated_members
)
)
updated_policy: Policy = _replace_binding(
policy, binding, updated_binding
)
self.set_service_account_iam_policy(account, updated_policy)
logger.debug(
"Role %s granted to member %s for Service Account %s",
role,
member,
account,
)
@handle_etag_conflict
def remove_service_account_iam_policy_binding(
self, account: str, role: str, member: str
) -> None:
"""Remove an IAM policy binding from the IAM policy of a service
account.
See for details on updating policy bindings:
https://cloud.google.com/iam/docs/reference/rest/v1/projects.serviceAccounts/setIamPolicy
"""
policy: Policy = self.get_service_account_iam_policy(account)
binding: Optional[Policy.Binding] = policy.find_binding_for_role(role)
if binding is None:
logger.debug(
"Noop: Service Account %s has no bindings for role %s",
account,
role,
)
return
if member not in binding.members:
logger.debug(
"Noop: Service Account %s binding for role %s has no member %s",
account,
role,
member,
)
return
updated_members: FrozenSet[str] = binding.members.difference({member})
updated_binding: Policy.Binding = (
dataclasses.replace( # pylint: disable=too-many-function-args
binding, members=updated_members
)
)
updated_policy: Policy = _replace_binding(
policy, binding, updated_binding
)
self.set_service_account_iam_policy(account, updated_policy)
logger.debug(
"Role %s revoked from member %s for Service Account %s",
role,
member,
account,
)
| 12,310
| 33.008287
| 97
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/infrastructure/gcp/network_security.py
|
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import dataclasses
import logging
from typing import Any, Dict
from google.rpc import code_pb2
import tenacity
from framework.infrastructure import gcp
logger = logging.getLogger(__name__)
# Type aliases
GcpResource = gcp.compute.ComputeV1.GcpResource
@dataclasses.dataclass(frozen=True)
class ServerTlsPolicy:
url: str
name: str
server_certificate: dict
mtls_policy: dict
update_time: str
create_time: str
@classmethod
def from_response(
cls, name: str, response: Dict[str, Any]
) -> "ServerTlsPolicy":
return cls(
name=name,
url=response["name"],
server_certificate=response.get("serverCertificate", {}),
mtls_policy=response.get("mtlsPolicy", {}),
create_time=response["createTime"],
update_time=response["updateTime"],
)
@dataclasses.dataclass(frozen=True)
class ClientTlsPolicy:
url: str
name: str
client_certificate: dict
server_validation_ca: list
update_time: str
create_time: str
@classmethod
def from_response(
cls, name: str, response: Dict[str, Any]
) -> "ClientTlsPolicy":
return cls(
name=name,
url=response["name"],
client_certificate=response.get("clientCertificate", {}),
server_validation_ca=response.get("serverValidationCa", []),
create_time=response["createTime"],
update_time=response["updateTime"],
)
@dataclasses.dataclass(frozen=True)
class AuthorizationPolicy:
url: str
name: str
update_time: str
create_time: str
action: str
rules: list
@classmethod
def from_response(
cls, name: str, response: Dict[str, Any]
) -> "AuthorizationPolicy":
return cls(
name=name,
url=response["name"],
create_time=response["createTime"],
update_time=response["updateTime"],
action=response["action"],
rules=response.get("rules", []),
)
class _NetworkSecurityBase(
gcp.api.GcpStandardCloudApiResource, metaclass=abc.ABCMeta
):
"""Base class for NetworkSecurity APIs."""
# TODO(https://github.com/grpc/grpc/issues/29532) remove pylint disable
# pylint: disable=abstract-method
def __init__(self, api_manager: gcp.api.GcpApiManager, project: str):
super().__init__(api_manager.networksecurity(self.api_version), project)
# Shortcut to projects/*/locations/ endpoints
self._api_locations = self.api.projects().locations()
@property
def api_name(self) -> str:
return "networksecurity"
def _execute(
self, *args, **kwargs
): # pylint: disable=signature-differs,arguments-differ
# Workaround TD bug: throttled operations are reported as internal.
# Ref b/175345578
retryer = tenacity.Retrying(
retry=tenacity.retry_if_exception(self._operation_internal_error),
wait=tenacity.wait_fixed(10),
stop=tenacity.stop_after_delay(5 * 60),
before_sleep=tenacity.before_sleep_log(logger, logging.DEBUG),
reraise=True,
)
retryer(super()._execute, *args, **kwargs)
@staticmethod
def _operation_internal_error(exception):
return (
isinstance(exception, gcp.api.OperationError)
and exception.error.code == code_pb2.INTERNAL
)
class NetworkSecurityV1Beta1(_NetworkSecurityBase):
"""NetworkSecurity API v1beta1."""
SERVER_TLS_POLICIES = "serverTlsPolicies"
CLIENT_TLS_POLICIES = "clientTlsPolicies"
AUTHZ_POLICIES = "authorizationPolicies"
@property
def api_version(self) -> str:
return "v1beta1"
def create_server_tls_policy(self, name: str, body: dict) -> GcpResource:
return self._create_resource(
collection=self._api_locations.serverTlsPolicies(),
body=body,
serverTlsPolicyId=name,
)
def get_server_tls_policy(self, name: str) -> ServerTlsPolicy:
response = self._get_resource(
collection=self._api_locations.serverTlsPolicies(),
full_name=self.resource_full_name(name, self.SERVER_TLS_POLICIES),
)
return ServerTlsPolicy.from_response(name, response)
def delete_server_tls_policy(self, name: str) -> bool:
return self._delete_resource(
collection=self._api_locations.serverTlsPolicies(),
full_name=self.resource_full_name(name, self.SERVER_TLS_POLICIES),
)
def create_client_tls_policy(self, name: str, body: dict) -> GcpResource:
return self._create_resource(
collection=self._api_locations.clientTlsPolicies(),
body=body,
clientTlsPolicyId=name,
)
def get_client_tls_policy(self, name: str) -> ClientTlsPolicy:
response = self._get_resource(
collection=self._api_locations.clientTlsPolicies(),
full_name=self.resource_full_name(name, self.CLIENT_TLS_POLICIES),
)
return ClientTlsPolicy.from_response(name, response)
def delete_client_tls_policy(self, name: str) -> bool:
return self._delete_resource(
collection=self._api_locations.clientTlsPolicies(),
full_name=self.resource_full_name(name, self.CLIENT_TLS_POLICIES),
)
def create_authz_policy(self, name: str, body: dict) -> GcpResource:
return self._create_resource(
collection=self._api_locations.authorizationPolicies(),
body=body,
authorizationPolicyId=name,
)
def get_authz_policy(self, name: str) -> ClientTlsPolicy:
response = self._get_resource(
collection=self._api_locations.authorizationPolicies(),
full_name=self.resource_full_name(name, self.AUTHZ_POLICIES),
)
return ClientTlsPolicy.from_response(name, response)
def delete_authz_policy(self, name: str) -> bool:
return self._delete_resource(
collection=self._api_locations.authorizationPolicies(),
full_name=self.resource_full_name(name, self.AUTHZ_POLICIES),
)
class NetworkSecurityV1Alpha1(NetworkSecurityV1Beta1):
"""NetworkSecurity API v1alpha1.
Note: extending v1beta1 class presumes that v1beta1 is just a v1alpha1 API
graduated into a more stable version. This is true in most cases. However,
v1alpha1 class can always override and reimplement incompatible methods.
"""
@property
def api_version(self) -> str:
return "v1alpha1"
| 7,218
| 31.518018
| 80
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/infrastructure/gcp/api.py
|
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import contextlib
import functools
import json
import logging
from typing import Any, Dict, List, Optional
from absl import flags
from google.cloud import secretmanager_v1
from google.longrunning import operations_pb2
from google.protobuf import json_format
from google.rpc import code_pb2
from google.rpc import error_details_pb2
from google.rpc import status_pb2
from googleapiclient import discovery
import googleapiclient.errors
import googleapiclient.http
import tenacity
import yaml
import framework.helpers.highlighter
logger = logging.getLogger(__name__)
PRIVATE_API_KEY_SECRET_NAME = flags.DEFINE_string(
"private_api_key_secret_name",
default=None,
help=(
"Load Private API access key from the latest version of the secret "
"with the given name, in the format projects/*/secrets/*"
),
)
V1_DISCOVERY_URI = flags.DEFINE_string(
"v1_discovery_uri",
default=discovery.V1_DISCOVERY_URI,
help="Override v1 Discovery URI",
)
V2_DISCOVERY_URI = flags.DEFINE_string(
"v2_discovery_uri",
default=discovery.V2_DISCOVERY_URI,
help="Override v2 Discovery URI",
)
COMPUTE_V1_DISCOVERY_FILE = flags.DEFINE_string(
"compute_v1_discovery_file",
default=None,
help="Load compute v1 from discovery file",
)
GCP_UI_URL = flags.DEFINE_string(
"gcp_ui_url",
default="console.cloud.google.com",
help="Override GCP UI URL.",
)
# Type aliases
_HttpError = googleapiclient.errors.HttpError
_HttpLib2Error = googleapiclient.http.httplib2.HttpLib2Error
_HighlighterYaml = framework.helpers.highlighter.HighlighterYaml
Operation = operations_pb2.Operation
HttpRequest = googleapiclient.http.HttpRequest
class GcpApiManager:
def __init__(
self,
*,
v1_discovery_uri=None,
v2_discovery_uri=None,
compute_v1_discovery_file=None,
private_api_key_secret_name=None,
gcp_ui_url=None,
):
self.v1_discovery_uri = v1_discovery_uri or V1_DISCOVERY_URI.value
self.v2_discovery_uri = v2_discovery_uri or V2_DISCOVERY_URI.value
self.compute_v1_discovery_file = (
compute_v1_discovery_file or COMPUTE_V1_DISCOVERY_FILE.value
)
self.private_api_key_secret_name = (
private_api_key_secret_name or PRIVATE_API_KEY_SECRET_NAME.value
)
self.gcp_ui_url = gcp_ui_url or GCP_UI_URL.value
# TODO(sergiitk): add options to pass google Credentials
self._exit_stack = contextlib.ExitStack()
def close(self):
self._exit_stack.close()
@property
@functools.lru_cache(None)
def private_api_key(self):
"""
Private API key.
Return API key credential that identifies a GCP project allow-listed for
accessing private API discovery documents.
https://console.cloud.google.com/apis/credentials
This method lazy-loads the content of the key from the Secret Manager.
https://console.cloud.google.com/security/secret-manager
"""
if not self.private_api_key_secret_name:
raise ValueError(
"private_api_key_secret_name must be set to "
"access private_api_key."
)
secrets_api = self.secrets("v1")
version_resource_path = secrets_api.secret_version_path(
**secrets_api.parse_secret_path(self.private_api_key_secret_name),
secret_version="latest",
)
secret: secretmanager_v1.AccessSecretVersionResponse
secret = secrets_api.access_secret_version(name=version_resource_path)
return secret.payload.data.decode()
@functools.lru_cache(None)
def compute(self, version):
api_name = "compute"
if version == "v1":
if self.compute_v1_discovery_file:
return self._build_from_file(self.compute_v1_discovery_file)
else:
return self._build_from_discovery_v1(api_name, version)
elif version == "v1alpha":
return self._build_from_discovery_v1(api_name, "alpha")
raise NotImplementedError(f"Compute {version} not supported")
@functools.lru_cache(None)
def networksecurity(self, version):
api_name = "networksecurity"
if version == "v1alpha1":
return self._build_from_discovery_v2(
api_name,
version,
api_key=self.private_api_key,
visibility_labels=["NETWORKSECURITY_ALPHA"],
)
elif version == "v1beta1":
return self._build_from_discovery_v2(api_name, version)
raise NotImplementedError(f"Network Security {version} not supported")
@functools.lru_cache(None)
def networkservices(self, version):
api_name = "networkservices"
if version == "v1alpha1":
return self._build_from_discovery_v2(
api_name,
version,
api_key=self.private_api_key,
visibility_labels=["NETWORKSERVICES_ALPHA"],
)
elif version == "v1beta1":
return self._build_from_discovery_v2(api_name, version)
raise NotImplementedError(f"Network Services {version} not supported")
@staticmethod
@functools.lru_cache(None)
def secrets(version: str):
if version == "v1":
return secretmanager_v1.SecretManagerServiceClient()
raise NotImplementedError(f"Secret Manager {version} not supported")
@functools.lru_cache(None)
def iam(self, version: str) -> discovery.Resource:
"""Identity and Access Management (IAM) API.
https://cloud.google.com/iam/docs/reference/rest
https://googleapis.github.io/google-api-python-client/docs/dyn/iam_v1.html
"""
api_name = "iam"
if version == "v1":
return self._build_from_discovery_v1(api_name, version)
raise NotImplementedError(
f"Identity and Access Management (IAM) {version} not supported"
)
def _build_from_discovery_v1(self, api_name, version):
api = discovery.build(
api_name,
version,
cache_discovery=False,
discoveryServiceUrl=self.v1_discovery_uri,
)
self._exit_stack.enter_context(api)
return api
def _build_from_discovery_v2(
self,
api_name,
version,
*,
api_key: Optional[str] = None,
visibility_labels: Optional[List] = None,
):
params = {}
if api_key:
params["key"] = api_key
if visibility_labels:
# Dash-separated list of labels.
params["labels"] = "_".join(visibility_labels)
params_str = ""
if params:
params_str = "&" + "&".join(f"{k}={v}" for k, v in params.items())
api = discovery.build(
api_name,
version,
cache_discovery=False,
discoveryServiceUrl=f"{self.v2_discovery_uri}{params_str}",
)
self._exit_stack.enter_context(api)
return api
def _build_from_file(self, discovery_file):
with open(discovery_file, "r") as f:
api = discovery.build_from_document(f.read())
self._exit_stack.enter_context(api)
return api
class Error(Exception):
"""Base error class for GCP API errors."""
class ResponseError(Error):
"""The response was not a 2xx."""
reason: str
uri: str
error_details: Optional[str]
status: Optional[int]
cause: _HttpError
def __init__(self, cause: _HttpError):
# TODO(sergiitk): cleanup when we upgrade googleapiclient:
# - remove _get_reason()
# - remove error_details note
# - use status_code()
self.reason = cause._get_reason().strip() # noqa
self.uri = cause.uri
self.error_details = cause.error_details # NOTE: Must after _get_reason
self.status = None
if cause.resp and cause.resp.status:
self.status = cause.resp.status
self.cause = cause
super().__init__()
def __repr__(self):
return (
f"<ResponseError {self.status} when requesting {self.uri} "
f'returned "{self.reason}". Details: "{self.error_details}">'
)
class TransportError(Error):
"""A transport error has occurred."""
cause: _HttpLib2Error
def __init__(self, cause: _HttpLib2Error):
self.cause = cause
super().__init__()
def __repr__(self):
return f"<TransportError cause: {self.cause!r}>"
class OperationError(Error):
"""
Operation was not successful.
Assuming Operation based on Google API Style Guide:
https://cloud.google.com/apis/design/design_patterns#long_running_operations
https://github.com/googleapis/googleapis/blob/master/google/longrunning/operations.proto
"""
api_name: str
name: str
metadata: Any
code_name: code_pb2.Code
error: status_pb2.Status
def __init__(self, api_name: str, response: dict):
self.api_name = api_name
# Operation.metadata field is Any specific to the API. It may not be
# present in the default descriptor pool, and that's expected.
# To avoid json_format.ParseError, handle it separately.
self.metadata = response.pop("metadata", {})
# Must be after removing metadata field.
operation: Operation = self._parse_operation_response(response)
self.name = operation.name or "unknown"
self.code_name = code_pb2.Code.Name(operation.error.code)
self.error = operation.error
super().__init__()
@staticmethod
def _parse_operation_response(operation_response: dict) -> Operation:
try:
return json_format.ParseDict(
operation_response,
Operation(),
ignore_unknown_fields=True,
descriptor_pool=error_details_pb2.DESCRIPTOR.pool,
)
except (json_format.Error, TypeError) as e:
# Swallow parsing errors if any. Building correct OperationError()
# is more important than losing debug information. Details still
# can be extracted from the warning.
logger.warning(
(
"Can't parse response while processing OperationError:"
" '%r', error %r"
),
operation_response,
e,
)
return Operation()
def __str__(self):
indent_l1 = " " * 2
indent_l2 = indent_l1 * 2
result = (
f'{self.api_name} operation "{self.name}" failed.\n'
f"{indent_l1}code: {self.error.code} ({self.code_name})\n"
f'{indent_l1}message: "{self.error.message}"'
)
if self.error.details:
result += f"\n{indent_l1}details: [\n"
for any_error in self.error.details:
error_str = json_format.MessageToJson(any_error)
for line in error_str.splitlines():
result += indent_l2 + line + "\n"
result += f"{indent_l1}]"
if self.metadata:
result += f"\n metadata: \n"
metadata_str = json.dumps(self.metadata, indent=2)
for line in metadata_str.splitlines():
result += indent_l2 + line + "\n"
result = result.rstrip()
return result
class GcpProjectApiResource:
# TODO(sergiitk): move someplace better
_WAIT_FOR_OPERATION_SEC = 60 * 10
_WAIT_FIXED_SEC = 2
_GCP_API_RETRIES = 5
def __init__(self, api: discovery.Resource, project: str):
self.api: discovery.Resource = api
self.project: str = project
self._highlighter = _HighlighterYaml()
# TODO(sergiitk): in upcoming GCP refactoring, differentiate between
# _execute for LRO (Long Running Operations), and immediate operations.
def _execute(
self,
request: HttpRequest,
*,
num_retries: Optional[int] = _GCP_API_RETRIES,
) -> Dict[str, Any]:
"""Execute the immediate request.
Returns:
Unmarshalled response as a dictionary.
Raises:
ResponseError if the response was not a 2xx.
TransportError if a transport error has occurred.
"""
if num_retries is None:
num_retries = self._GCP_API_RETRIES
try:
return request.execute(num_retries=num_retries)
except _HttpError as error:
raise ResponseError(error)
except _HttpLib2Error as error:
raise TransportError(error)
def resource_pretty_format(self, body: dict) -> str:
"""Return a string with pretty-printed resource body."""
yaml_out: str = yaml.dump(body, explicit_start=True, explicit_end=True)
return self._highlighter.highlight(yaml_out)
@staticmethod
def wait_for_operation(
operation_request,
test_success_fn,
timeout_sec=_WAIT_FOR_OPERATION_SEC,
wait_sec=_WAIT_FIXED_SEC,
):
retryer = tenacity.Retrying(
retry=(
tenacity.retry_if_not_result(test_success_fn)
| tenacity.retry_if_exception_type()
),
wait=tenacity.wait_fixed(wait_sec),
stop=tenacity.stop_after_delay(timeout_sec),
after=tenacity.after_log(logger, logging.DEBUG),
reraise=True,
)
return retryer(operation_request.execute)
class GcpStandardCloudApiResource(GcpProjectApiResource, metaclass=abc.ABCMeta):
GLOBAL_LOCATION = "global"
def parent(self, location: Optional[str] = GLOBAL_LOCATION):
if location is None:
location = self.GLOBAL_LOCATION
return f"projects/{self.project}/locations/{location}"
def resource_full_name(self, name, collection_name):
return f"{self.parent()}/{collection_name}/{name}"
def _create_resource(
self, collection: discovery.Resource, body: dict, **kwargs
):
logger.info(
"Creating %s resource:\n%s",
self.api_name,
self.resource_pretty_format(body),
)
create_req = collection.create(
parent=self.parent(), body=body, **kwargs
)
self._execute(create_req)
@property
@abc.abstractmethod
def api_name(self) -> str:
raise NotImplementedError
@property
@abc.abstractmethod
def api_version(self) -> str:
raise NotImplementedError
def _get_resource(self, collection: discovery.Resource, full_name):
resource = collection.get(name=full_name).execute()
logger.info(
"Loaded %s:\n%s", full_name, self.resource_pretty_format(resource)
)
return resource
def _delete_resource(
self, collection: discovery.Resource, full_name: str
) -> bool:
logger.debug("Deleting %s", full_name)
try:
self._execute(collection.delete(name=full_name))
return True
except _HttpError as error:
if error.resp and error.resp.status == 404:
logger.info("%s not deleted since it does not exist", full_name)
else:
logger.warning("Failed to delete %s, %r", full_name, error)
return False
# TODO(sergiitk): Use ResponseError and TransportError
def _execute( # pylint: disable=arguments-differ
self,
request: HttpRequest,
timeout_sec: int = GcpProjectApiResource._WAIT_FOR_OPERATION_SEC,
):
operation = request.execute(num_retries=self._GCP_API_RETRIES)
logger.debug("Operation %s", operation)
self._wait(operation["name"], timeout_sec)
def _wait(
self,
operation_id: str,
timeout_sec: int = GcpProjectApiResource._WAIT_FOR_OPERATION_SEC,
):
logger.info(
"Waiting %s sec for %s operation id: %s",
timeout_sec,
self.api_name,
operation_id,
)
op_request = (
self.api.projects().locations().operations().get(name=operation_id)
)
operation = self.wait_for_operation(
operation_request=op_request,
test_success_fn=lambda result: result["done"],
timeout_sec=timeout_sec,
)
logger.debug("Completed operation: %s", operation)
if "error" in operation:
raise OperationError(self.api_name, operation)
| 17,176
| 32.224371
| 92
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/infrastructure/gcp/__init__.py
|
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from framework.infrastructure.gcp import api
from framework.infrastructure.gcp import compute
from framework.infrastructure.gcp import iam
from framework.infrastructure.gcp import network_security
from framework.infrastructure.gcp import network_services
| 832
| 42.842105
| 74
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/infrastructure/gcp/compute.py
|
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import datetime
import enum
import logging
from typing import Any, Dict, List, Optional, Set
from googleapiclient import discovery
import googleapiclient.errors
from framework.helpers import retryers
from framework.infrastructure import gcp
logger = logging.getLogger(__name__)
class ComputeV1(
gcp.api.GcpProjectApiResource
): # pylint: disable=too-many-public-methods
# TODO(sergiitk): move someplace better
_WAIT_FOR_BACKEND_SEC = 60 * 10
_WAIT_FOR_BACKEND_SLEEP_SEC = 4
_WAIT_FOR_OPERATION_SEC = 60 * 10
@dataclasses.dataclass(frozen=True)
class GcpResource:
name: str
url: str
@dataclasses.dataclass(frozen=True)
class ZonalGcpResource(GcpResource):
zone: str
def __init__(
self,
api_manager: gcp.api.GcpApiManager,
project: str,
version: str = "v1",
):
super().__init__(api_manager.compute(version), project)
class HealthCheckProtocol(enum.Enum):
TCP = enum.auto()
GRPC = enum.auto()
class BackendServiceProtocol(enum.Enum):
HTTP2 = enum.auto()
GRPC = enum.auto()
def create_health_check(
self,
name: str,
protocol: HealthCheckProtocol,
*,
port: Optional[int] = None,
) -> "GcpResource":
if protocol is self.HealthCheckProtocol.TCP:
health_check_field = "tcpHealthCheck"
elif protocol is self.HealthCheckProtocol.GRPC:
health_check_field = "grpcHealthCheck"
else:
raise TypeError(f"Unexpected Health Check protocol: {protocol}")
health_check_settings = {}
if port is None:
health_check_settings["portSpecification"] = "USE_SERVING_PORT"
else:
health_check_settings["portSpecification"] = "USE_FIXED_PORT"
health_check_settings["port"] = port
return self._insert_resource(
self.api.healthChecks(),
{
"name": name,
"type": protocol.name,
health_check_field: health_check_settings,
},
)
def list_health_check(self):
return self._list_resource(self.api.healthChecks())
def delete_health_check(self, name):
self._delete_resource(self.api.healthChecks(), "healthCheck", name)
def create_firewall_rule(
self,
name: str,
network_url: str,
source_ranges: List[str],
ports: List[str],
) -> Optional["GcpResource"]:
try:
return self._insert_resource(
self.api.firewalls(),
{
"allowed": [{"IPProtocol": "tcp", "ports": ports}],
"direction": "INGRESS",
"name": name,
"network": network_url,
"priority": 1000,
"sourceRanges": source_ranges,
"targetTags": ["allow-health-checks"],
},
)
except googleapiclient.errors.HttpError as http_error:
# TODO(lidiz) use status_code() when we upgrade googleapiclient
if http_error.resp.status == 409:
logger.debug("Firewall rule %s already existed", name)
return None
else:
raise
def delete_firewall_rule(self, name):
self._delete_resource(self.api.firewalls(), "firewall", name)
def create_backend_service_traffic_director(
self,
name: str,
health_check: "GcpResource",
affinity_header: Optional[str] = None,
protocol: Optional[BackendServiceProtocol] = None,
subset_size: Optional[int] = None,
locality_lb_policies: Optional[List[dict]] = None,
outlier_detection: Optional[dict] = None,
) -> "GcpResource":
if not isinstance(protocol, self.BackendServiceProtocol):
raise TypeError(f"Unexpected Backend Service protocol: {protocol}")
body = {
"name": name,
"loadBalancingScheme": "INTERNAL_SELF_MANAGED", # Traffic Director
"healthChecks": [health_check.url],
"protocol": protocol.name,
}
# If affinity header is specified, config the backend service to support
# affinity, and set affinity header to the one given.
if affinity_header:
body["sessionAffinity"] = "HEADER_FIELD"
body["localityLbPolicy"] = "RING_HASH"
body["consistentHash"] = {
"httpHeaderName": affinity_header,
}
if subset_size:
body["subsetting"] = {
"policy": "CONSISTENT_HASH_SUBSETTING",
"subsetSize": subset_size,
}
if locality_lb_policies:
body["localityLbPolicies"] = locality_lb_policies
if outlier_detection:
body["outlierDetection"] = outlier_detection
return self._insert_resource(self.api.backendServices(), body)
def get_backend_service_traffic_director(self, name: str) -> "GcpResource":
return self._get_resource(
self.api.backendServices(), backendService=name
)
def patch_backend_service(self, backend_service, body, **kwargs):
self._patch_resource(
collection=self.api.backendServices(),
backendService=backend_service.name,
body=body,
**kwargs,
)
def backend_service_patch_backends(
self,
backend_service,
backends,
max_rate_per_endpoint: Optional[int] = None,
):
if max_rate_per_endpoint is None:
max_rate_per_endpoint = 5
backend_list = [
{
"group": backend.url,
"balancingMode": "RATE",
"maxRatePerEndpoint": max_rate_per_endpoint,
}
for backend in backends
]
self._patch_resource(
collection=self.api.backendServices(),
body={"backends": backend_list},
backendService=backend_service.name,
)
def backend_service_remove_all_backends(self, backend_service):
self._patch_resource(
collection=self.api.backendServices(),
body={"backends": []},
backendService=backend_service.name,
)
def delete_backend_service(self, name):
self._delete_resource(
self.api.backendServices(), "backendService", name
)
def create_url_map(
self,
name: str,
matcher_name: str,
src_hosts,
dst_default_backend_service: "GcpResource",
dst_host_rule_match_backend_service: Optional["GcpResource"] = None,
) -> "GcpResource":
if dst_host_rule_match_backend_service is None:
dst_host_rule_match_backend_service = dst_default_backend_service
return self._insert_resource(
self.api.urlMaps(),
{
"name": name,
"defaultService": dst_default_backend_service.url,
"hostRules": [
{
"hosts": src_hosts,
"pathMatcher": matcher_name,
}
],
"pathMatchers": [
{
"name": matcher_name,
"defaultService": dst_host_rule_match_backend_service.url,
}
],
},
)
def create_url_map_with_content(self, url_map_body: Any) -> "GcpResource":
return self._insert_resource(self.api.urlMaps(), url_map_body)
def patch_url_map(self, url_map: "GcpResource", body, **kwargs):
self._patch_resource(
collection=self.api.urlMaps(),
urlMap=url_map.name,
body=body,
**kwargs,
)
def delete_url_map(self, name):
self._delete_resource(self.api.urlMaps(), "urlMap", name)
def create_target_grpc_proxy(
self,
name: str,
url_map: "GcpResource",
validate_for_proxyless: bool = True,
) -> "GcpResource":
return self._insert_resource(
self.api.targetGrpcProxies(),
{
"name": name,
"url_map": url_map.url,
"validate_for_proxyless": validate_for_proxyless,
},
)
def delete_target_grpc_proxy(self, name):
self._delete_resource(
self.api.targetGrpcProxies(), "targetGrpcProxy", name
)
def create_target_http_proxy(
self,
name: str,
url_map: "GcpResource",
) -> "GcpResource":
return self._insert_resource(
self.api.targetHttpProxies(),
{
"name": name,
"url_map": url_map.url,
},
)
def delete_target_http_proxy(self, name):
self._delete_resource(
self.api.targetHttpProxies(), "targetHttpProxy", name
)
def create_forwarding_rule(
self,
name: str,
src_port: int,
target_proxy: "GcpResource",
network_url: str,
*,
ip_address: str = "0.0.0.0",
) -> "GcpResource":
return self._insert_resource(
self.api.globalForwardingRules(),
{
"name": name,
"loadBalancingScheme": "INTERNAL_SELF_MANAGED", # Traffic Director
"portRange": src_port,
"IPAddress": ip_address,
"network": network_url,
"target": target_proxy.url,
},
)
def exists_forwarding_rule(self, src_port) -> bool:
# TODO(sergiitk): Better approach for confirming the port is available.
# It's possible a rule allocates actual port range, e.g 8000-9000,
# and this wouldn't catch it. For now, we assume there's no
# port ranges used in the project.
filter_str = (
f'(portRange eq "{src_port}-{src_port}") '
'(IPAddress eq "0.0.0.0")'
'(loadBalancingScheme eq "INTERNAL_SELF_MANAGED")'
)
return self._exists_resource(
self.api.globalForwardingRules(), resource_filter=filter_str
)
def delete_forwarding_rule(self, name):
self._delete_resource(
self.api.globalForwardingRules(), "forwardingRule", name
)
def wait_for_network_endpoint_group(
self,
name: str,
zone: str,
*,
timeout_sec=_WAIT_FOR_BACKEND_SEC,
wait_sec=_WAIT_FOR_BACKEND_SLEEP_SEC,
):
retryer = retryers.constant_retryer(
wait_fixed=datetime.timedelta(seconds=wait_sec),
timeout=datetime.timedelta(seconds=timeout_sec),
check_result=lambda neg: neg and neg.get("size", 0) > 0,
)
network_endpoint_group = retryer(
self._retry_network_endpoint_group_ready, name, zone
)
# TODO(sergiitk): dataclass
return self.ZonalGcpResource(
network_endpoint_group["name"],
network_endpoint_group["selfLink"],
zone,
)
def _retry_network_endpoint_group_ready(self, name: str, zone: str):
try:
neg = self.get_network_endpoint_group(name, zone)
logger.debug(
"Waiting for endpoints: NEG %s in zone %s, current count %s",
neg["name"],
zone,
neg.get("size"),
)
except googleapiclient.errors.HttpError as error:
# noinspection PyProtectedMember
reason = error._get_reason()
logger.debug(
"Retrying NEG load, got %s, details %s",
error.resp.status,
reason,
)
raise
return neg
def get_network_endpoint_group(self, name, zone):
neg = (
self.api.networkEndpointGroups()
.get(project=self.project, networkEndpointGroup=name, zone=zone)
.execute()
)
# TODO(sergiitk): dataclass
return neg
def wait_for_backends_healthy_status(
self,
backend_service: GcpResource,
backends: Set[ZonalGcpResource],
*,
timeout_sec: int = _WAIT_FOR_BACKEND_SEC,
wait_sec: int = _WAIT_FOR_BACKEND_SLEEP_SEC,
):
retryer = retryers.constant_retryer(
wait_fixed=datetime.timedelta(seconds=wait_sec),
timeout=datetime.timedelta(seconds=timeout_sec),
check_result=lambda result: result,
)
pending = set(backends)
retryer(self._retry_backends_health, backend_service, pending)
def _retry_backends_health(
self, backend_service: GcpResource, pending: Set[ZonalGcpResource]
):
for backend in pending:
result = self.get_backend_service_backend_health(
backend_service, backend
)
if "healthStatus" not in result:
logger.debug(
"Waiting for instances: backend %s, zone %s",
backend.name,
backend.zone,
)
continue
backend_healthy = True
for instance in result["healthStatus"]:
logger.debug(
"Backend %s in zone %s: instance %s:%s health: %s",
backend.name,
backend.zone,
instance["ipAddress"],
instance["port"],
instance["healthState"],
)
if instance["healthState"] != "HEALTHY":
backend_healthy = False
if backend_healthy:
logger.info(
"Backend %s in zone %s reported healthy",
backend.name,
backend.zone,
)
pending.remove(backend)
return not pending
def get_backend_service_backend_health(self, backend_service, backend):
return (
self.api.backendServices()
.getHealth(
project=self.project,
backendService=backend_service.name,
body={"group": backend.url},
)
.execute()
)
def _get_resource(
self, collection: discovery.Resource, **kwargs
) -> "GcpResource":
resp = collection.get(project=self.project, **kwargs).execute()
logger.info(
"Loaded compute resource:\n%s", self.resource_pretty_format(resp)
)
return self.GcpResource(resp["name"], resp["selfLink"])
def _exists_resource(
self, collection: discovery.Resource, resource_filter: str
) -> bool:
resp = collection.list(
project=self.project, filter=resource_filter, maxResults=1
).execute(num_retries=self._GCP_API_RETRIES)
if "kind" not in resp:
# TODO(sergiitk): better error
raise ValueError('List response "kind" is missing')
return "items" in resp and resp["items"]
def _insert_resource(
self, collection: discovery.Resource, body: Dict[str, Any]
) -> "GcpResource":
logger.info(
"Creating compute resource:\n%s", self.resource_pretty_format(body)
)
resp = self._execute(collection.insert(project=self.project, body=body))
return self.GcpResource(body["name"], resp["targetLink"])
def _patch_resource(self, collection, body, **kwargs):
logger.info(
"Patching compute resource:\n%s", self.resource_pretty_format(body)
)
self._execute(
collection.patch(project=self.project, body=body, **kwargs)
)
def _list_resource(self, collection: discovery.Resource):
return collection.list(project=self.project).execute(
num_retries=self._GCP_API_RETRIES
)
def _delete_resource(
self,
collection: discovery.Resource,
resource_type: str,
resource_name: str,
) -> bool:
try:
params = {"project": self.project, resource_type: resource_name}
self._execute(collection.delete(**params))
return True
except googleapiclient.errors.HttpError as error:
if error.resp and error.resp.status == 404:
logger.info(
'Resource %s "%s" not deleted since it does not exist',
resource_type,
resource_name,
)
else:
logger.warning(
'Failed to delete %s "%s", %r',
resource_type,
resource_name,
error,
)
return False
@staticmethod
def _operation_status_done(operation):
return "status" in operation and operation["status"] == "DONE"
def _execute( # pylint: disable=arguments-differ
self, request, *, timeout_sec=_WAIT_FOR_OPERATION_SEC
):
operation = request.execute(num_retries=self._GCP_API_RETRIES)
logger.debug("Operation %s", operation)
return self._wait(operation["name"], timeout_sec)
def _wait(
self, operation_id: str, timeout_sec: int = _WAIT_FOR_OPERATION_SEC
) -> dict:
logger.info(
"Waiting %s sec for compute operation id: %s",
timeout_sec,
operation_id,
)
# TODO(sergiitk) try using wait() here
# https://googleapis.github.io/google-api-python-client/docs/dyn/compute_v1.globalOperations.html#wait
op_request = self.api.globalOperations().get(
project=self.project, operation=operation_id
)
operation = self.wait_for_operation(
operation_request=op_request,
test_success_fn=self._operation_status_done,
timeout_sec=timeout_sec,
)
logger.debug("Completed operation: %s", operation)
if "error" in operation:
# This shouldn't normally happen: gcp library raises on errors.
raise Exception(
f"Compute operation {operation_id} failed: {operation}"
)
return operation
| 18,979
| 32.953488
| 110
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/infrastructure/gcp/network_services.py
|
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import dataclasses
import logging
from typing import Any, Dict, List, Optional, Tuple
from google.rpc import code_pb2
import tenacity
from framework.infrastructure import gcp
logger = logging.getLogger(__name__)
# Type aliases
GcpResource = gcp.compute.ComputeV1.GcpResource
@dataclasses.dataclass(frozen=True)
class EndpointPolicy:
url: str
name: str
type: str
traffic_port_selector: dict
endpoint_matcher: dict
update_time: str
create_time: str
http_filters: Optional[dict] = None
server_tls_policy: Optional[str] = None
@classmethod
def from_response(
cls, name: str, response: Dict[str, Any]
) -> "EndpointPolicy":
return cls(
name=name,
url=response["name"],
type=response["type"],
server_tls_policy=response.get("serverTlsPolicy", None),
traffic_port_selector=response["trafficPortSelector"],
endpoint_matcher=response["endpointMatcher"],
http_filters=response.get("httpFilters", None),
update_time=response["updateTime"],
create_time=response["createTime"],
)
@dataclasses.dataclass(frozen=True)
class Mesh:
name: str
url: str
routes: Optional[List[str]]
@classmethod
def from_response(cls, name: str, d: Dict[str, Any]) -> "Mesh":
return cls(
name=name,
url=d["name"],
routes=list(d["routes"]) if "routes" in d else None,
)
@dataclasses.dataclass(frozen=True)
class GrpcRoute:
@dataclasses.dataclass(frozen=True)
class MethodMatch:
type: Optional[str]
grpc_service: Optional[str]
grpc_method: Optional[str]
case_sensitive: Optional[bool]
@classmethod
def from_response(cls, d: Dict[str, Any]) -> "GrpcRoute.MethodMatch":
return cls(
type=d.get("type"),
grpc_service=d.get("grpcService"),
grpc_method=d.get("grpcMethod"),
case_sensitive=d.get("caseSensitive"),
)
@dataclasses.dataclass(frozen=True)
class HeaderMatch:
type: Optional[str]
key: str
value: str
@classmethod
def from_response(cls, d: Dict[str, Any]) -> "GrpcRoute.HeaderMatch":
return cls(
type=d.get("type"),
key=d["key"],
value=d["value"],
)
@dataclasses.dataclass(frozen=True)
class RouteMatch:
method: Optional["GrpcRoute.MethodMatch"]
headers: Tuple["GrpcRoute.HeaderMatch"]
@classmethod
def from_response(cls, d: Dict[str, Any]) -> "GrpcRoute.RouteMatch":
return cls(
method=GrpcRoute.MethodMatch.from_response(d["method"])
if "method" in d
else None,
headers=tuple(
GrpcRoute.HeaderMatch.from_response(h) for h in d["headers"]
)
if "headers" in d
else (),
)
@dataclasses.dataclass(frozen=True)
class Destination:
service_name: str
weight: Optional[int]
@classmethod
def from_response(cls, d: Dict[str, Any]) -> "GrpcRoute.Destination":
return cls(
service_name=d["serviceName"],
weight=d.get("weight"),
)
@dataclasses.dataclass(frozen=True)
class RouteAction:
@classmethod
def from_response(cls, d: Dict[str, Any]) -> "GrpcRoute.RouteAction":
destinations = (
[
GrpcRoute.Destination.from_response(dest)
for dest in d["destinations"]
]
if "destinations" in d
else []
)
return cls(destinations=destinations)
@dataclasses.dataclass(frozen=True)
class RouteRule:
matches: List["GrpcRoute.RouteMatch"]
action: "GrpcRoute.RouteAction"
@classmethod
def from_response(cls, d: Dict[str, Any]) -> "GrpcRoute.RouteRule":
matches = (
[GrpcRoute.RouteMatch.from_response(m) for m in d["matches"]]
if "matches" in d
else []
)
return cls(
matches=matches,
action=GrpcRoute.RouteAction.from_response(d["action"]),
)
name: str
url: str
hostnames: Tuple[str]
rules: Tuple["GrpcRoute.RouteRule"]
meshes: Optional[Tuple[str]]
@classmethod
def from_response(
cls, name: str, d: Dict[str, Any]
) -> "GrpcRoute.RouteRule":
return cls(
name=name,
url=d["name"],
hostnames=tuple(d["hostnames"]),
rules=tuple(d["rules"]),
meshes=None if d.get("meshes") is None else tuple(d["meshes"]),
)
class _NetworkServicesBase(
gcp.api.GcpStandardCloudApiResource, metaclass=abc.ABCMeta
):
"""Base class for NetworkServices APIs."""
# TODO(https://github.com/grpc/grpc/issues/29532) remove pylint disable
# pylint: disable=abstract-method
def __init__(self, api_manager: gcp.api.GcpApiManager, project: str):
super().__init__(api_manager.networkservices(self.api_version), project)
# Shortcut to projects/*/locations/ endpoints
self._api_locations = self.api.projects().locations()
@property
def api_name(self) -> str:
return "networkservices"
def _execute(
self, *args, **kwargs
): # pylint: disable=signature-differs,arguments-differ
# Workaround TD bug: throttled operations are reported as internal.
# Ref b/175345578
retryer = tenacity.Retrying(
retry=tenacity.retry_if_exception(self._operation_internal_error),
wait=tenacity.wait_fixed(10),
stop=tenacity.stop_after_delay(5 * 60),
before_sleep=tenacity.before_sleep_log(logger, logging.DEBUG),
reraise=True,
)
retryer(super()._execute, *args, **kwargs)
@staticmethod
def _operation_internal_error(exception):
return (
isinstance(exception, gcp.api.OperationError)
and exception.error.code == code_pb2.INTERNAL
)
class NetworkServicesV1Beta1(_NetworkServicesBase):
"""NetworkServices API v1beta1."""
ENDPOINT_POLICIES = "endpointPolicies"
@property
def api_version(self) -> str:
return "v1beta1"
def create_endpoint_policy(self, name, body: dict) -> GcpResource:
return self._create_resource(
collection=self._api_locations.endpointPolicies(),
body=body,
endpointPolicyId=name,
)
def get_endpoint_policy(self, name: str) -> EndpointPolicy:
response = self._get_resource(
collection=self._api_locations.endpointPolicies(),
full_name=self.resource_full_name(name, self.ENDPOINT_POLICIES),
)
return EndpointPolicy.from_response(name, response)
def delete_endpoint_policy(self, name: str) -> bool:
return self._delete_resource(
collection=self._api_locations.endpointPolicies(),
full_name=self.resource_full_name(name, self.ENDPOINT_POLICIES),
)
class NetworkServicesV1Alpha1(NetworkServicesV1Beta1):
"""NetworkServices API v1alpha1.
Note: extending v1beta1 class presumes that v1beta1 is just a v1alpha1 API
graduated into a more stable version. This is true in most cases. However,
v1alpha1 class can always override and reimplement incompatible methods.
"""
GRPC_ROUTES = "grpcRoutes"
MESHES = "meshes"
@property
def api_version(self) -> str:
return "v1alpha1"
def create_mesh(self, name: str, body: dict) -> GcpResource:
return self._create_resource(
collection=self._api_locations.meshes(), body=body, meshId=name
)
def get_mesh(self, name: str) -> Mesh:
full_name = self.resource_full_name(name, self.MESHES)
result = self._get_resource(
collection=self._api_locations.meshes(), full_name=full_name
)
return Mesh.from_response(name, result)
def delete_mesh(self, name: str) -> bool:
return self._delete_resource(
collection=self._api_locations.meshes(),
full_name=self.resource_full_name(name, self.MESHES),
)
def create_grpc_route(self, name: str, body: dict) -> GcpResource:
return self._create_resource(
collection=self._api_locations.grpcRoutes(),
body=body,
grpcRouteId=name,
)
def get_grpc_route(self, name: str) -> GrpcRoute:
full_name = self.resource_full_name(name, self.GRPC_ROUTES)
result = self._get_resource(
collection=self._api_locations.grpcRoutes(), full_name=full_name
)
return GrpcRoute.from_response(name, result)
def delete_grpc_route(self, name: str) -> bool:
return self._delete_resource(
collection=self._api_locations.grpcRoutes(),
full_name=self.resource_full_name(name, self.GRPC_ROUTES),
)
| 9,827
| 30.909091
| 80
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/rpc/grpc_channelz.py
|
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This contains helpers for gRPC services defined in
https://github.com/grpc/grpc-proto/blob/master/grpc/channelz/v1/channelz.proto
"""
import ipaddress
import logging
from typing import Iterator, Optional
import grpc
from grpc_channelz.v1 import channelz_pb2
from grpc_channelz.v1 import channelz_pb2_grpc
import framework.rpc
logger = logging.getLogger(__name__)
# Type aliases
# Channel
Channel = channelz_pb2.Channel
ChannelConnectivityState = channelz_pb2.ChannelConnectivityState
ChannelState = ChannelConnectivityState.State # pylint: disable=no-member
_GetTopChannelsRequest = channelz_pb2.GetTopChannelsRequest
_GetTopChannelsResponse = channelz_pb2.GetTopChannelsResponse
# Subchannel
Subchannel = channelz_pb2.Subchannel
_GetSubchannelRequest = channelz_pb2.GetSubchannelRequest
_GetSubchannelResponse = channelz_pb2.GetSubchannelResponse
# Server
Server = channelz_pb2.Server
_GetServersRequest = channelz_pb2.GetServersRequest
_GetServersResponse = channelz_pb2.GetServersResponse
# Sockets
Socket = channelz_pb2.Socket
SocketRef = channelz_pb2.SocketRef
_GetSocketRequest = channelz_pb2.GetSocketRequest
_GetSocketResponse = channelz_pb2.GetSocketResponse
Address = channelz_pb2.Address
Security = channelz_pb2.Security
# Server Sockets
_GetServerSocketsRequest = channelz_pb2.GetServerSocketsRequest
_GetServerSocketsResponse = channelz_pb2.GetServerSocketsResponse
class ChannelzServiceClient(framework.rpc.grpc.GrpcClientHelper):
stub: channelz_pb2_grpc.ChannelzStub
def __init__(
self, channel: grpc.Channel, *, log_target: Optional[str] = ""
):
super().__init__(
channel, channelz_pb2_grpc.ChannelzStub, log_target=log_target
)
@staticmethod
def is_sock_tcpip_address(address: Address):
return address.WhichOneof("address") == "tcpip_address"
@staticmethod
def is_ipv4(tcpip_address: Address.TcpIpAddress):
# According to proto, tcpip_address.ip_address is either IPv4 or IPv6.
# Correspondingly, it's either 4 bytes or 16 bytes in length.
return len(tcpip_address.ip_address) == 4
@classmethod
def sock_address_to_str(cls, address: Address):
if cls.is_sock_tcpip_address(address):
tcpip_address: Address.TcpIpAddress = address.tcpip_address
if cls.is_ipv4(tcpip_address):
ip = ipaddress.IPv4Address(tcpip_address.ip_address)
else:
ip = ipaddress.IPv6Address(tcpip_address.ip_address)
return f"{ip}:{tcpip_address.port}"
else:
raise NotImplementedError("Only tcpip_address implemented")
@classmethod
def sock_addresses_pretty(cls, socket: Socket):
return (
f"local={cls.sock_address_to_str(socket.local)}, "
f"remote={cls.sock_address_to_str(socket.remote)}"
)
@staticmethod
def find_server_socket_matching_client(
server_sockets: Iterator[Socket], client_socket: Socket
) -> Socket:
for server_socket in server_sockets:
if server_socket.remote == client_socket.local:
return server_socket
return None
@staticmethod
def channel_repr(channel: Channel) -> str:
result = f"<Channel channel_id={channel.ref.channel_id}"
if channel.data.target:
result += f" target={channel.data.target}"
result += f" state={ChannelState.Name(channel.data.state.state)}>"
return result
@staticmethod
def subchannel_repr(subchannel: Subchannel) -> str:
result = f"<Subchannel subchannel_id={subchannel.ref.subchannel_id}"
if subchannel.data.target:
result += f" target={subchannel.data.target}"
result += f" state={ChannelState.Name(subchannel.data.state.state)}>"
return result
def find_channels_for_target(
self, target: str, **kwargs
) -> Iterator[Channel]:
return (
channel
for channel in self.list_channels(**kwargs)
if channel.data.target == target
)
def find_server_listening_on_port(
self, port: int, **kwargs
) -> Optional[Server]:
for server in self.list_servers(**kwargs):
listen_socket_ref: SocketRef
for listen_socket_ref in server.listen_socket:
listen_socket = self.get_socket(
listen_socket_ref.socket_id, **kwargs
)
listen_address: Address = listen_socket.local
if (
self.is_sock_tcpip_address(listen_address)
and listen_address.tcpip_address.port == port
):
return server
return None
def list_channels(self, **kwargs) -> Iterator[Channel]:
"""
Iterate over all pages of all root channels.
Root channels are those which application has directly created.
This does not include subchannels nor non-top level channels.
"""
start: int = -1
response: Optional[_GetTopChannelsResponse] = None
while start < 0 or not response.end:
# From proto: To request subsequent pages, the client generates this
# value by adding 1 to the highest seen result ID.
start += 1
response = self.call_unary_with_deadline(
rpc="GetTopChannels",
req=_GetTopChannelsRequest(start_channel_id=start),
**kwargs,
)
for channel in response.channel:
start = max(start, channel.ref.channel_id)
yield channel
def list_servers(self, **kwargs) -> Iterator[Server]:
"""Iterate over all pages of all servers that exist in the process."""
start: int = -1
response: Optional[_GetServersResponse] = None
while start < 0 or not response.end:
# From proto: To request subsequent pages, the client generates this
# value by adding 1 to the highest seen result ID.
start += 1
response = self.call_unary_with_deadline(
rpc="GetServers",
req=_GetServersRequest(start_server_id=start),
**kwargs,
)
for server in response.server:
start = max(start, server.ref.server_id)
yield server
def list_server_sockets(self, server: Server, **kwargs) -> Iterator[Socket]:
"""List all server sockets that exist in server process.
Iterating over the results will resolve additional pages automatically.
"""
start: int = -1
response: Optional[_GetServerSocketsResponse] = None
while start < 0 or not response.end:
# From proto: To request subsequent pages, the client generates this
# value by adding 1 to the highest seen result ID.
start += 1
response = self.call_unary_with_deadline(
rpc="GetServerSockets",
req=_GetServerSocketsRequest(
server_id=server.ref.server_id, start_socket_id=start
),
**kwargs,
)
socket_ref: SocketRef
for socket_ref in response.socket_ref:
start = max(start, socket_ref.socket_id)
# Yield actual socket
yield self.get_socket(socket_ref.socket_id, **kwargs)
def list_channel_sockets(
self, channel: Channel, **kwargs
) -> Iterator[Socket]:
"""List all sockets of all subchannels of a given channel."""
for subchannel in self.list_channel_subchannels(channel, **kwargs):
yield from self.list_subchannels_sockets(subchannel, **kwargs)
def list_channel_subchannels(
self, channel: Channel, **kwargs
) -> Iterator[Subchannel]:
"""List all subchannels of a given channel."""
for subchannel_ref in channel.subchannel_ref:
yield self.get_subchannel(subchannel_ref.subchannel_id, **kwargs)
def list_subchannels_sockets(
self, subchannel: Subchannel, **kwargs
) -> Iterator[Socket]:
"""List all sockets of a given subchannel."""
for socket_ref in subchannel.socket_ref:
yield self.get_socket(socket_ref.socket_id, **kwargs)
def get_subchannel(self, subchannel_id, **kwargs) -> Subchannel:
"""Return a single Subchannel, otherwise raises RpcError."""
response: _GetSubchannelResponse = self.call_unary_with_deadline(
rpc="GetSubchannel",
req=_GetSubchannelRequest(subchannel_id=subchannel_id),
**kwargs,
)
return response.subchannel
def get_socket(self, socket_id, **kwargs) -> Socket:
"""Return a single Socket, otherwise raises RpcError."""
response: _GetSocketResponse = self.call_unary_with_deadline(
rpc="GetSocket",
req=_GetSocketRequest(socket_id=socket_id),
**kwargs,
)
return response.socket
| 9,639
| 37.870968
| 80
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/rpc/grpc_testing.py
|
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This contains helpers for gRPC services defined in
https://github.com/grpc/grpc/blob/master/src/proto/grpc/testing/test.proto
"""
import logging
from typing import Iterable, Optional, Tuple
import grpc
from grpc_health.v1 import health_pb2
from grpc_health.v1 import health_pb2_grpc
import framework.rpc
from src.proto.grpc.testing import empty_pb2
from src.proto.grpc.testing import messages_pb2
from src.proto.grpc.testing import test_pb2_grpc
# Type aliases
_LoadBalancerStatsRequest = messages_pb2.LoadBalancerStatsRequest
LoadBalancerStatsResponse = messages_pb2.LoadBalancerStatsResponse
_LoadBalancerAccumulatedStatsRequest = (
messages_pb2.LoadBalancerAccumulatedStatsRequest
)
LoadBalancerAccumulatedStatsResponse = (
messages_pb2.LoadBalancerAccumulatedStatsResponse
)
MethodStats = messages_pb2.LoadBalancerAccumulatedStatsResponse.MethodStats
RpcsByPeer = messages_pb2.LoadBalancerStatsResponse.RpcsByPeer
class LoadBalancerStatsServiceClient(framework.rpc.grpc.GrpcClientHelper):
stub: test_pb2_grpc.LoadBalancerStatsServiceStub
STATS_PARTIAL_RESULTS_TIMEOUT_SEC = 1200
STATS_ACCUMULATED_RESULTS_TIMEOUT_SEC = 600
def __init__(
self, channel: grpc.Channel, *, log_target: Optional[str] = ""
):
super().__init__(
channel,
test_pb2_grpc.LoadBalancerStatsServiceStub,
log_target=log_target,
)
def get_client_stats(
self,
*,
num_rpcs: int,
timeout_sec: Optional[int] = STATS_PARTIAL_RESULTS_TIMEOUT_SEC,
) -> LoadBalancerStatsResponse:
if timeout_sec is None:
timeout_sec = self.STATS_PARTIAL_RESULTS_TIMEOUT_SEC
return self.call_unary_with_deadline(
rpc="GetClientStats",
req=_LoadBalancerStatsRequest(
num_rpcs=num_rpcs, timeout_sec=timeout_sec
),
deadline_sec=timeout_sec,
log_level=logging.INFO,
)
def get_client_accumulated_stats(
self, *, timeout_sec: Optional[int] = None
) -> LoadBalancerAccumulatedStatsResponse:
if timeout_sec is None:
timeout_sec = self.STATS_ACCUMULATED_RESULTS_TIMEOUT_SEC
return self.call_unary_with_deadline(
rpc="GetClientAccumulatedStats",
req=_LoadBalancerAccumulatedStatsRequest(),
deadline_sec=timeout_sec,
log_level=logging.INFO,
)
class XdsUpdateClientConfigureServiceClient(
framework.rpc.grpc.GrpcClientHelper
):
stub: test_pb2_grpc.XdsUpdateClientConfigureServiceStub
CONFIGURE_TIMEOUT_SEC: int = 5
def __init__(
self, channel: grpc.Channel, *, log_target: Optional[str] = ""
):
super().__init__(
channel,
test_pb2_grpc.XdsUpdateClientConfigureServiceStub,
log_target=log_target,
)
def configure(
self,
*,
rpc_types: Iterable[str],
metadata: Optional[Iterable[Tuple[str, str, str]]] = None,
app_timeout: Optional[int] = None,
timeout_sec: int = CONFIGURE_TIMEOUT_SEC,
) -> None:
request = messages_pb2.ClientConfigureRequest()
for rpc_type in rpc_types:
request.types.append(
messages_pb2.ClientConfigureRequest.RpcType.Value(rpc_type)
)
if metadata:
for entry in metadata:
request.metadata.append(
messages_pb2.ClientConfigureRequest.Metadata(
type=messages_pb2.ClientConfigureRequest.RpcType.Value(
entry[0]
),
key=entry[1],
value=entry[2],
)
)
if app_timeout:
request.timeout_sec = app_timeout
# Configure's response is empty
self.call_unary_with_deadline(
rpc="Configure",
req=request,
deadline_sec=timeout_sec,
log_level=logging.INFO,
)
class XdsUpdateHealthServiceClient(framework.rpc.grpc.GrpcClientHelper):
stub: test_pb2_grpc.XdsUpdateHealthServiceStub
def __init__(self, channel: grpc.Channel, log_target: Optional[str] = ""):
super().__init__(
channel,
test_pb2_grpc.XdsUpdateHealthServiceStub,
log_target=log_target,
)
def set_serving(self):
self.call_unary_with_deadline(
rpc="SetServing", req=empty_pb2.Empty(), log_level=logging.INFO
)
def set_not_serving(self):
self.call_unary_with_deadline(
rpc="SetNotServing", req=empty_pb2.Empty(), log_level=logging.INFO
)
class HealthClient(framework.rpc.grpc.GrpcClientHelper):
stub: health_pb2_grpc.HealthStub
def __init__(self, channel: grpc.Channel, log_target: Optional[str] = ""):
super().__init__(
channel, health_pb2_grpc.HealthStub, log_target=log_target
)
def check_health(self):
return self.call_unary_with_deadline(
rpc="Check",
req=health_pb2.HealthCheckRequest(),
log_level=logging.INFO,
)
| 5,771
| 32.172414
| 79
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/rpc/grpc.py
|
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
from typing import Any, Dict, Optional
from google.protobuf import json_format
import google.protobuf.message
import grpc
logger = logging.getLogger(__name__)
# Type aliases
Message = google.protobuf.message.Message
class GrpcClientHelper:
DEFAULT_RPC_DEADLINE_SEC = 90
channel: grpc.Channel
# This is purely cosmetic to make RPC logs look like method calls.
log_service_name: str
# This is purely cosmetic to output the RPC target. Normally set to the
# hostname:port of the remote service, but it doesn't have to be the
# real target. This is done so that when RPC are routed to the proxy
# or port forwarding, this still is set to a useful name.
log_target: str
def __init__(
self,
channel: grpc.Channel,
stub_class: Any,
*,
log_target: Optional[str] = "",
):
self.channel = channel
self.stub = stub_class(channel)
self.log_service_name = re.sub(
"Stub$", "", self.stub.__class__.__name__
)
self.log_target = log_target or ""
def call_unary_with_deadline(
self,
*,
rpc: str,
req: Message,
deadline_sec: Optional[int] = DEFAULT_RPC_DEADLINE_SEC,
log_level: Optional[int] = logging.DEBUG,
) -> Message:
if deadline_sec is None:
deadline_sec = self.DEFAULT_RPC_DEADLINE_SEC
call_kwargs = dict(wait_for_ready=True, timeout=deadline_sec)
self._log_rpc_request(rpc, req, call_kwargs, log_level)
# Call RPC, e.g. RpcStub(channel).RpcMethod(req, ...options)
rpc_callable: grpc.UnaryUnaryMultiCallable = getattr(self.stub, rpc)
return rpc_callable(req, **call_kwargs)
def _log_rpc_request(self, rpc, req, call_kwargs, log_level=logging.DEBUG):
logger.log(
logging.DEBUG if log_level is None else log_level,
"[%s] >> RPC %s.%s(request=%s(%r), %s)",
self.log_target,
self.log_service_name,
rpc,
req.__class__.__name__,
json_format.MessageToDict(req),
", ".join({f"{k}={v}" for k, v in call_kwargs.items()}),
)
class GrpcApp:
channels: Dict[int, grpc.Channel]
class NotFound(Exception):
"""Requested resource not found"""
def __init__(self, message):
self.message = message
super().__init__(message)
def __init__(self, rpc_host):
self.rpc_host = rpc_host
# Cache gRPC channels per port
self.channels = dict()
def _make_channel(self, port) -> grpc.Channel:
if port not in self.channels:
target = f"{self.rpc_host}:{port}"
self.channels[port] = grpc.insecure_channel(target)
return self.channels[port]
def close(self):
# Close all channels
for channel in self.channels.values():
channel.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return False
def __del__(self):
self.close()
| 3,708
| 30.168067
| 79
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/rpc/__init__.py
|
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from framework.rpc import grpc
| 608
| 39.6
| 74
|
py
|
grpc
|
grpc-master/tools/run_tests/xds_k8s_test_driver/framework/rpc/grpc_csds.py
|
# Copyright 2021 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This contains helpers for gRPC services defined in
https://github.com/envoyproxy/envoy/blob/main/api/envoy/service/status/v3/csds.proto
"""
import logging
from typing import Optional
# Envoy protos provided by PyPI package xds-protos
# Needs to import the generated Python file to load descriptors
# pylint: disable=unused-import
from envoy.extensions.filters.common.fault.v3 import fault_pb2 as _
from envoy.extensions.filters.http.fault.v3 import fault_pb2 as _
from envoy.extensions.filters.http.router.v3 import router_pb2 as _
from envoy.extensions.filters.network.http_connection_manager.v3 import (
http_connection_manager_pb2 as _,
)
# pylint: enable=unused-import
from envoy.service.status.v3 import csds_pb2
from envoy.service.status.v3 import csds_pb2_grpc
import grpc
import framework.rpc
logger = logging.getLogger(__name__)
# Type aliases
ClientConfig = csds_pb2.ClientConfig
_ClientStatusRequest = csds_pb2.ClientStatusRequest
class CsdsClient(framework.rpc.grpc.GrpcClientHelper):
stub: csds_pb2_grpc.ClientStatusDiscoveryServiceStub
def __init__(
self, channel: grpc.Channel, *, log_target: Optional[str] = ""
):
super().__init__(
channel,
csds_pb2_grpc.ClientStatusDiscoveryServiceStub,
log_target=log_target,
)
def fetch_client_status(self, **kwargs) -> Optional[ClientConfig]:
"""Fetches the active xDS configurations."""
response = self.call_unary_with_deadline(
rpc="FetchClientStatus", req=_ClientStatusRequest(), **kwargs
)
if len(response.config) != 1:
logger.debug(
"Unexpected number of client configs: %s", len(response.config)
)
return None
return response.config[0]
| 2,374
| 33.42029
| 84
|
py
|
grpc
|
grpc-master/tools/run_tests/performance/bq_upload_result.py
|
#!/usr/bin/env python3
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Uploads performance benchmark result file to bigquery.
from __future__ import print_function
import argparse
import calendar
import json
import os
import sys
import time
import uuid
gcp_utils_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), "../../gcp/utils")
)
sys.path.append(gcp_utils_dir)
import big_query_utils
_PROJECT_ID = "grpc-testing"
def _upload_netperf_latency_csv_to_bigquery(dataset_id, table_id, result_file):
with open(result_file, "r") as f:
(col1, col2, col3) = f.read().split(",")
latency50 = float(col1.strip()) * 1000
latency90 = float(col2.strip()) * 1000
latency99 = float(col3.strip()) * 1000
scenario_result = {
"scenario": {"name": "netperf_tcp_rr"},
"summary": {
"latency50": latency50,
"latency90": latency90,
"latency99": latency99,
},
}
bq = big_query_utils.create_big_query()
_create_results_table(bq, dataset_id, table_id)
if not _insert_result(
bq, dataset_id, table_id, scenario_result, flatten=False
):
print("Error uploading result to bigquery.")
sys.exit(1)
def _upload_scenario_result_to_bigquery(
dataset_id,
table_id,
result_file,
metadata_file,
node_info_file,
prometheus_query_results_file,
):
with open(result_file, "r") as f:
scenario_result = json.loads(f.read())
bq = big_query_utils.create_big_query()
_create_results_table(bq, dataset_id, table_id)
if not _insert_scenario_result(
bq,
dataset_id,
table_id,
scenario_result,
metadata_file,
node_info_file,
prometheus_query_results_file,
):
print("Error uploading result to bigquery.")
sys.exit(1)
def _insert_result(bq, dataset_id, table_id, scenario_result, flatten=True):
if flatten:
_flatten_result_inplace(scenario_result)
_populate_metadata_inplace(scenario_result)
row = big_query_utils.make_row(str(uuid.uuid4()), scenario_result)
return big_query_utils.insert_rows(
bq, _PROJECT_ID, dataset_id, table_id, [row]
)
def _insert_scenario_result(
bq,
dataset_id,
table_id,
scenario_result,
test_metadata_file,
node_info_file,
prometheus_query_results_file,
flatten=True,
):
if flatten:
_flatten_result_inplace(scenario_result)
_populate_metadata_from_file(scenario_result, test_metadata_file)
_populate_node_metadata_from_file(scenario_result, node_info_file)
_populate_prometheus_query_results_from_file(
scenario_result, prometheus_query_results_file
)
row = big_query_utils.make_row(str(uuid.uuid4()), scenario_result)
return big_query_utils.insert_rows(
bq, _PROJECT_ID, dataset_id, table_id, [row]
)
def _create_results_table(bq, dataset_id, table_id):
with open(
os.path.dirname(__file__) + "/scenario_result_schema.json", "r"
) as f:
table_schema = json.loads(f.read())
desc = "Results of performance benchmarks."
return big_query_utils.create_table2(
bq, _PROJECT_ID, dataset_id, table_id, table_schema, desc
)
def _flatten_result_inplace(scenario_result):
"""Bigquery is not really great for handling deeply nested data
and repeated fields. To maintain values of some fields while keeping
the schema relatively simple, we artificially leave some of the fields
as JSON strings.
"""
scenario_result["scenario"]["clientConfig"] = json.dumps(
scenario_result["scenario"]["clientConfig"]
)
scenario_result["scenario"]["serverConfig"] = json.dumps(
scenario_result["scenario"]["serverConfig"]
)
scenario_result["latencies"] = json.dumps(scenario_result["latencies"])
scenario_result["serverCpuStats"] = []
for stats in scenario_result["serverStats"]:
scenario_result["serverCpuStats"].append(dict())
scenario_result["serverCpuStats"][-1]["totalCpuTime"] = stats.pop(
"totalCpuTime", None
)
scenario_result["serverCpuStats"][-1]["idleCpuTime"] = stats.pop(
"idleCpuTime", None
)
for stats in scenario_result["clientStats"]:
stats["latencies"] = json.dumps(stats["latencies"])
stats.pop("requestResults", None)
scenario_result["serverCores"] = json.dumps(scenario_result["serverCores"])
scenario_result["clientSuccess"] = json.dumps(
scenario_result["clientSuccess"]
)
scenario_result["serverSuccess"] = json.dumps(
scenario_result["serverSuccess"]
)
scenario_result["requestResults"] = json.dumps(
scenario_result.get("requestResults", [])
)
scenario_result["serverCpuUsage"] = scenario_result["summary"].pop(
"serverCpuUsage", None
)
scenario_result["summary"].pop("successfulRequestsPerSecond", None)
scenario_result["summary"].pop("failedRequestsPerSecond", None)
def _populate_metadata_inplace(scenario_result):
"""Populates metadata based on environment variables set by Jenkins."""
# NOTE: Grabbing the Kokoro environment variables will only work if the
# driver is running locally on the same machine where Kokoro has started
# the job. For our setup, this is currently the case, so just assume that.
build_number = os.getenv("KOKORO_BUILD_NUMBER")
build_url = (
"https://source.cloud.google.com/results/invocations/%s"
% os.getenv("KOKORO_BUILD_ID")
)
job_name = os.getenv("KOKORO_JOB_NAME")
git_commit = os.getenv("KOKORO_GIT_COMMIT")
# actual commit is the actual head of PR that is getting tested
# TODO(jtattermusch): unclear how to obtain on Kokoro
git_actual_commit = os.getenv("ghprbActualCommit")
utc_timestamp = str(calendar.timegm(time.gmtime()))
metadata = {"created": utc_timestamp}
if build_number:
metadata["buildNumber"] = build_number
if build_url:
metadata["buildUrl"] = build_url
if job_name:
metadata["jobName"] = job_name
if git_commit:
metadata["gitCommit"] = git_commit
if git_actual_commit:
metadata["gitActualCommit"] = git_actual_commit
scenario_result["metadata"] = metadata
def _populate_metadata_from_file(scenario_result, test_metadata_file):
utc_timestamp = str(calendar.timegm(time.gmtime()))
metadata = {"created": utc_timestamp}
_annotation_to_bq_metadata_key_map = {
"ci_" + key: key
for key in (
"buildNumber",
"buildUrl",
"jobName",
"gitCommit",
"gitActualCommit",
)
}
if os.access(test_metadata_file, os.R_OK):
with open(test_metadata_file, "r") as f:
test_metadata = json.loads(f.read())
# eliminate managedFields from metadata set
if "managedFields" in test_metadata:
del test_metadata["managedFields"]
annotations = test_metadata.get("annotations", {})
# if use kubectl apply ..., kubectl will append current configuration to
# annotation, the field is deleted since it includes a lot of irrelevant
# information
if "kubectl.kubernetes.io/last-applied-configuration" in annotations:
del annotations["kubectl.kubernetes.io/last-applied-configuration"]
# dump all metadata as JSON to testMetadata field
scenario_result["testMetadata"] = json.dumps(test_metadata)
for key, value in _annotation_to_bq_metadata_key_map.items():
if key in annotations:
metadata[value] = annotations[key]
scenario_result["metadata"] = metadata
def _populate_node_metadata_from_file(scenario_result, node_info_file):
node_metadata = {"driver": {}, "servers": [], "clients": []}
_node_info_to_bq_node_metadata_key_map = {
"Name": "name",
"PodIP": "podIP",
"NodeName": "nodeName",
}
if os.access(node_info_file, os.R_OK):
with open(node_info_file, "r") as f:
file_metadata = json.loads(f.read())
for key, value in _node_info_to_bq_node_metadata_key_map.items():
node_metadata["driver"][value] = file_metadata["Driver"][key]
for clientNodeInfo in file_metadata["Clients"]:
node_metadata["clients"].append(
{
value: clientNodeInfo[key]
for key, value in _node_info_to_bq_node_metadata_key_map.items()
}
)
for serverNodeInfo in file_metadata["Servers"]:
node_metadata["servers"].append(
{
value: serverNodeInfo[key]
for key, value in _node_info_to_bq_node_metadata_key_map.items()
}
)
scenario_result["nodeMetadata"] = node_metadata
def _populate_prometheus_query_results_from_file(
scenario_result, prometheus_query_result_file
):
"""Populate the results from Prometheus query to Bigquery table"""
if os.access(prometheus_query_result_file, os.R_OK):
with open(prometheus_query_result_file, "r", encoding="utf8") as f:
file_query_results = json.loads(f.read())
scenario_result["testDurationSeconds"] = file_query_results[
"testDurationSeconds"
]
clientsPrometheusData = []
if "clients" in file_query_results:
for client_name, client_data in file_query_results[
"clients"
].items():
clientPrometheusData = {"name": client_name}
containersPrometheusData = []
for container_name, container_data in client_data.items():
containerPrometheusData = {
"name": container_name,
"cpuSeconds": container_data["cpuSeconds"],
"memoryMean": container_data["memoryMean"],
}
containersPrometheusData.append(containerPrometheusData)
clientPrometheusData[
"containers"
] = containersPrometheusData
clientsPrometheusData.append(clientPrometheusData)
scenario_result["clientsPrometheusData"] = clientsPrometheusData
serversPrometheusData = []
if "servers" in file_query_results:
for server_name, server_data in file_query_results[
"servers"
].items():
serverPrometheusData = {"name": server_name}
containersPrometheusData = []
for container_name, container_data in server_data.items():
containerPrometheusData = {
"name": container_name,
"cpuSeconds": container_data["cpuSeconds"],
"memoryMean": container_data["memoryMean"],
}
containersPrometheusData.append(containerPrometheusData)
serverPrometheusData[
"containers"
] = containersPrometheusData
serversPrometheusData.append(serverPrometheusData)
scenario_result["serversPrometheusData"] = serversPrometheusData
argp = argparse.ArgumentParser(description="Upload result to big query.")
argp.add_argument(
"--bq_result_table",
required=True,
default=None,
type=str,
help='Bigquery "dataset.table" to upload results to.',
)
argp.add_argument(
"--file_to_upload",
default="scenario_result.json",
type=str,
help="Report file to upload.",
)
argp.add_argument(
"--metadata_file_to_upload",
default="metadata.json",
type=str,
help="Metadata file to upload.",
)
argp.add_argument(
"--node_info_file_to_upload",
default="node_info.json",
type=str,
help="Node information file to upload.",
)
argp.add_argument(
"--prometheus_query_results_to_upload",
default="prometheus_query_result.json",
type=str,
help="Prometheus query result file to upload.",
)
argp.add_argument(
"--file_format",
choices=["scenario_result", "netperf_latency_csv"],
default="scenario_result",
help="Format of the file to upload.",
)
args = argp.parse_args()
dataset_id, table_id = args.bq_result_table.split(".", 2)
if args.file_format == "netperf_latency_csv":
_upload_netperf_latency_csv_to_bigquery(
dataset_id, table_id, args.file_to_upload
)
else:
_upload_scenario_result_to_bigquery(
dataset_id,
table_id,
args.file_to_upload,
args.metadata_file_to_upload,
args.node_info_file_to_upload,
args.prometheus_query_results_to_upload,
)
print(
"Successfully uploaded %s, %s, %s and %s to BigQuery.\n"
% (
args.file_to_upload,
args.metadata_file_to_upload,
args.node_info_file_to_upload,
args.prometheus_query_results_to_upload,
)
)
| 13,765
| 33.675063
| 84
|
py
|
grpc
|
grpc-master/tools/run_tests/performance/loadtest_template.py
|
#!/usr/bin/env python3
# Copyright 2021 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script generates a load test configuration template from a collection of
# load test configurations.
#
# Configuration templates contain client and server configurations for multiple
# languages, and may contain template substitution keys. These templates are
# used to generate load test configurations by selecting clients and servers for
# the required languages. The source files for template generation may be load
# test configurations or load test configuration templates. Load test
# configuration generation is performed by loadtest_config.py. See documentation
# below:
# https://github.com/grpc/grpc/blob/master/tools/run_tests/performance/README.md
import argparse
import os
import sys
from typing import Any, Dict, Iterable, List, Mapping, Type
import yaml
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import loadtest_config
TEMPLATE_FILE_HEADER_COMMENT = """
# Template generated from load test configurations by loadtest_template.py.
#
# Configuration templates contain client and server configurations for multiple
# languages, and may contain template substitution keys. These templates are
# used to generate load test configurations by selecting clients and servers for
# the required languages. The source files for template generation may be load
# test configurations or load test configuration templates. Load test
# configuration generation is performed by loadtest_config.py. See documentation
# below:
# https://github.com/grpc/grpc/blob/master/tools/run_tests/performance/README.md
"""
def insert_worker(
worker: Dict[str, Any], workers: List[Dict[str, Any]]
) -> None:
"""Inserts client or server into a list, without inserting duplicates."""
def dump(w):
return yaml.dump(w, Dumper=yaml.SafeDumper, default_flow_style=False)
worker_str = dump(worker)
if any((worker_str == dump(w) for w in workers)):
return
workers.append(worker)
def uniquify_workers(workermap: Dict[str, List[Dict[str, Any]]]) -> None:
"""Name workers if there is more than one for the same map key."""
for workers in list(workermap.values()):
if len(workers) <= 1:
continue
for i, worker in enumerate(workers):
worker["name"] = str(i)
def loadtest_template(
input_file_names: Iterable[str],
metadata: Mapping[str, Any],
inject_client_pool: bool,
inject_driver_image: bool,
inject_driver_pool: bool,
inject_server_pool: bool,
inject_big_query_table: bool,
inject_timeout_seconds: bool,
inject_ttl_seconds: bool) -> Dict[str, Any]: # fmt: skip
"""Generates the load test template."""
spec = dict() # type: Dict[str, Any]
clientmap = dict() # Dict[str, List[Dict[str, Any]]]
servermap = dict() # Dict[Str, List[Dict[str, Any]]]
template = {
"apiVersion": "e2etest.grpc.io/v1",
"kind": "LoadTest",
"metadata": metadata,
}
for input_file_name in input_file_names:
with open(input_file_name) as f:
input_config = yaml.safe_load(f.read())
if input_config.get("apiVersion") != template["apiVersion"]:
raise ValueError(
"Unexpected api version in file {}: {}".format(
input_file_name, input_config.get("apiVersion")
)
)
if input_config.get("kind") != template["kind"]:
raise ValueError(
"Unexpected kind in file {}: {}".format(
input_file_name, input_config.get("kind")
)
)
for client in input_config["spec"]["clients"]:
del client["name"]
if inject_client_pool:
client["pool"] = "${client_pool}"
if client["language"] not in clientmap:
clientmap[client["language"]] = []
insert_worker(client, clientmap[client["language"]])
for server in input_config["spec"]["servers"]:
del server["name"]
if inject_server_pool:
server["pool"] = "${server_pool}"
if server["language"] not in servermap:
servermap[server["language"]] = []
insert_worker(server, servermap[server["language"]])
input_spec = input_config["spec"]
del input_spec["clients"]
del input_spec["servers"]
del input_spec["scenariosJSON"]
spec.update(input_config["spec"])
uniquify_workers(clientmap)
uniquify_workers(servermap)
spec.update(
{
"clients": sum(
(clientmap[language] for language in sorted(clientmap)),
start=[],
),
"servers": sum(
(servermap[language] for language in sorted(servermap)),
start=[],
),
}
)
if "driver" not in spec:
spec["driver"] = {"language": "cxx"}
driver = spec["driver"]
if "name" in driver:
del driver["name"]
if inject_driver_image:
if "run" not in driver:
driver["run"] = [{"name": "main"}]
driver["run"][0]["image"] = "${driver_image}"
if inject_driver_pool:
driver["pool"] = "${driver_pool}"
if "run" not in driver:
if inject_driver_pool:
raise ValueError("Cannot inject driver.pool: missing driver.run.")
del spec["driver"]
if inject_big_query_table:
if "results" not in spec:
spec["results"] = dict()
spec["results"]["bigQueryTable"] = "${big_query_table}"
if inject_timeout_seconds:
spec["timeoutSeconds"] = "${timeout_seconds}"
if inject_ttl_seconds:
spec["ttlSeconds"] = "${ttl_seconds}"
template["spec"] = spec
return template
def template_dumper(header_comment: str) -> Type[yaml.SafeDumper]:
"""Returns a custom dumper to dump templates in the expected format."""
class TemplateDumper(yaml.SafeDumper):
def expect_stream_start(self):
super().expect_stream_start()
if isinstance(self.event, yaml.StreamStartEvent):
self.write_indent()
self.write_indicator(header_comment, need_whitespace=False)
def str_presenter(dumper, data):
if "\n" in data:
return dumper.represent_scalar(
"tag:yaml.org,2002:str", data, style="|"
)
return dumper.represent_scalar("tag:yaml.org,2002:str", data)
TemplateDumper.add_representer(str, str_presenter)
return TemplateDumper
def main() -> None:
argp = argparse.ArgumentParser(
description="Creates a load test config generator template.",
fromfile_prefix_chars="@",
)
argp.add_argument(
"-i",
"--inputs",
action="extend",
nargs="+",
type=str,
help="Input files.",
)
argp.add_argument(
"-o",
"--output",
type=str,
help="Output file. Outputs to stdout if not set.",
)
argp.add_argument(
"--inject_client_pool",
action="store_true",
help="Set spec.client(s).pool values to '${client_pool}'.",
)
argp.add_argument(
"--inject_driver_image",
action="store_true",
help="Set spec.driver(s).image values to '${driver_image}'.",
)
argp.add_argument(
"--inject_driver_pool",
action="store_true",
help="Set spec.driver(s).pool values to '${driver_pool}'.",
)
argp.add_argument(
"--inject_server_pool",
action="store_true",
help="Set spec.server(s).pool values to '${server_pool}'.",
)
argp.add_argument(
"--inject_big_query_table",
action="store_true",
help="Set spec.results.bigQueryTable to '${big_query_table}'.",
)
argp.add_argument(
"--inject_timeout_seconds",
action="store_true",
help="Set spec.timeoutSeconds to '${timeout_seconds}'.",
)
argp.add_argument(
"--inject_ttl_seconds", action="store_true", help="Set timeout "
)
argp.add_argument(
"-n", "--name", default="", type=str, help="metadata.name."
)
argp.add_argument(
"-a",
"--annotation",
action="append",
type=str,
help="metadata.annotation(s), in the form key=value.",
dest="annotations",
)
args = argp.parse_args()
annotations = loadtest_config.parse_key_value_args(args.annotations)
metadata = {"name": args.name}
if annotations:
metadata["annotations"] = annotations
template = loadtest_template(
input_file_names=args.inputs,
metadata=metadata,
inject_client_pool=args.inject_client_pool,
inject_driver_image=args.inject_driver_image,
inject_driver_pool=args.inject_driver_pool,
inject_server_pool=args.inject_server_pool,
inject_big_query_table=args.inject_big_query_table,
inject_timeout_seconds=args.inject_timeout_seconds,
inject_ttl_seconds=args.inject_ttl_seconds,
)
with open(args.output, "w") if args.output else sys.stdout as f:
yaml.dump(
template,
stream=f,
Dumper=template_dumper(TEMPLATE_FILE_HEADER_COMMENT.strip()),
default_flow_style=False,
)
if __name__ == "__main__":
main()
| 10,110
| 33.043771
| 80
|
py
|
grpc
|
grpc-master/tools/run_tests/performance/patch_scenario_results_schema.py
|
#!/usr/bin/env python3
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Use to patch schema of existing scenario results tables (after adding fields).
from __future__ import print_function
import argparse
import calendar
import json
import os
import sys
import time
import uuid
gcp_utils_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), "../../gcp/utils")
)
sys.path.append(gcp_utils_dir)
import big_query_utils
_PROJECT_ID = "grpc-testing"
def _patch_results_table(dataset_id, table_id):
bq = big_query_utils.create_big_query()
with open(
os.path.dirname(__file__) + "/scenario_result_schema.json", "r"
) as f:
table_schema = json.loads(f.read())
desc = "Results of performance benchmarks."
return big_query_utils.patch_table(
bq, _PROJECT_ID, dataset_id, table_id, table_schema
)
argp = argparse.ArgumentParser(
description="Patch schema of scenario results table."
)
argp.add_argument(
"--bq_result_table",
required=True,
default=None,
type=str,
help='Bigquery "dataset.table" to patch.',
)
args = argp.parse_args()
dataset_id, table_id = args.bq_result_table.split(".", 2)
_patch_results_table(dataset_id, table_id)
print("Successfully patched schema of %s.\n" % args.bq_result_table)
| 1,821
| 26.606061
| 80
|
py
|
grpc
|
grpc-master/tools/run_tests/performance/prometheus.py
|
#!/usr/bin/env python3
# Copyright 2022 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A script to fetch total cpu seconds and memory data from prometheus.
# example usage: python3 prometheus.py
# --url=http://prometheus.prometheus.svc.cluster.local:9090
# --pod_type=driver --pod_type=clients --container_name=main
# --container_name=sidecar
"""Perform Prometheus range queries to obtain cpu and memory data.
This module performs range queries through Prometheus API to obtain
total cpu seconds and memory during a test run for given container
in given pods. The cpu data obtained is total cpu second used within
given period of time. The memory data was instant memory usage at
the query time.
"""
import argparse
import json
import logging
import statistics
import time
from typing import Any, Dict, List
from dateutil import parser
import requests
class Prometheus:
"""Objects which holds the start time, end time and query URL."""
def __init__(
self,
url: str,
start: str,
end: str,
):
self.url = url
self.start = start
self.end = end
def _fetch_by_query(self, query: str) -> Dict[str, Any]:
"""Fetches the given query with time range.
Fetch the given query within a time range. The pulling
interval is every 5s, the actual data from the query is
a time series.
"""
resp = requests.get(
self.url + "/api/v1/query_range",
{"query": query, "start": self.start, "end": self.end, "step": 5},
)
resp.raise_for_status()
return resp.json()
def _fetch_cpu_for_pod(
self, container_matcher: str, pod_name: str
) -> Dict[str, List[float]]:
"""Fetches the cpu data for each pod.
Fetch total cpu seconds during the time range specified in the Prometheus instance
for a pod. After obtain the cpu seconds, the data are trimmed from time series to
a data list and saved in the Dict that keyed by the container names.
Args:
container_matcher: A string consist one or more container name separated by |.
"""
query = (
'container_cpu_usage_seconds_total{job="kubernetes-cadvisor",pod="'
+ pod_name
+ '",container='
+ container_matcher
+ "}"
)
logging.debug("running prometheus query for cpu: %s", query)
cpu_data = self._fetch_by_query(query)
logging.debug("raw cpu data: %s", str(cpu_data))
cpu_container_name_to_data_list = get_data_list_from_timeseries(
cpu_data
)
return cpu_container_name_to_data_list
def _fetch_memory_for_pod(
self, container_matcher: str, pod_name: str
) -> Dict[str, List[float]]:
"""Fetches memory data for each pod.
Fetch total memory data during the time range specified in the Prometheus instance
for a pod. After obtain the memory data, the data are trimmed from time series to
a data list and saved in the Dict that keyed by the container names.
Args:
container_matcher: A string consist one or more container name separated by |.
"""
query = (
'container_memory_usage_bytes{job="kubernetes-cadvisor",pod="'
+ pod_name
+ '",container='
+ container_matcher
+ "}"
)
logging.debug("running prometheus query for memory: %s", query)
memory_data = self._fetch_by_query(query)
logging.debug("raw memory data: %s", str(memory_data))
memory_container_name_to_data_list = get_data_list_from_timeseries(
memory_data
)
return memory_container_name_to_data_list
def fetch_cpu_and_memory_data(
self, container_list: List[str], pod_dict: Dict[str, List[str]]
) -> Dict[str, Any]:
"""Fetch total cpu seconds and memory data for multiple pods.
Args:
container_list: A list of container names to fetch the data for.
pod_dict: the pods to fetch data for, the pod_dict is keyed by
role of the pod: clients, driver and servers. The values
for the pod_dict are the list of pod names that consist
the same role specified in the key.
"""
container_matcher = construct_container_matcher(container_list)
processed_data = {}
for role, pod_names in pod_dict.items():
pod_data = {}
for pod in pod_names:
container_data = {}
for container, data in self._fetch_cpu_for_pod(
container_matcher, pod
).items():
container_data[container] = {}
container_data[container][
"cpuSeconds"
] = compute_total_cpu_seconds(data)
for container, data in self._fetch_memory_for_pod(
container_matcher, pod
).items():
container_data[container][
"memoryMean"
] = compute_average_memory_usage(data)
pod_data[pod] = container_data
processed_data[role] = pod_data
return processed_data
def construct_container_matcher(container_list: List[str]) -> str:
"""Constructs the container matching string used in the
prometheus query."""
if len(container_list) == 0:
raise Exception("no container name provided")
containers_to_fetch = '"'
if len(container_list) == 1:
containers_to_fetch = container_list[0]
else:
containers_to_fetch = '~"' + container_list[0]
for container in container_list[1:]:
containers_to_fetch = containers_to_fetch + "|" + container
containers_to_fetch = containers_to_fetch + '"'
return containers_to_fetch
def get_data_list_from_timeseries(data: Any) -> Dict[str, List[float]]:
"""Constructs a Dict as keys are the container names and
values are a list of data taken from given timeseries data."""
if data["status"] != "success":
raise Exception("command failed: " + data["status"] + str(data))
if data["data"]["resultType"] != "matrix":
raise Exception(
"resultType is not matrix: " + data["data"]["resultType"]
)
container_name_to_data_list = {}
for res in data["data"]["result"]:
container_name = res["metric"]["container"]
container_data_timeseries = res["values"]
container_data = []
for d in container_data_timeseries:
container_data.append(float(d[1]))
container_name_to_data_list[container_name] = container_data
return container_name_to_data_list
def compute_total_cpu_seconds(cpu_data_list: List[float]) -> float:
"""Computes the total cpu seconds by CPUs[end]-CPUs[start]."""
return cpu_data_list[len(cpu_data_list) - 1] - cpu_data_list[0]
def compute_average_memory_usage(memory_data_list: List[float]) -> float:
"""Computes the mean and for a given list of data."""
return statistics.mean(memory_data_list)
def construct_pod_dict(
node_info_file: str, pod_types: List[str]
) -> Dict[str, List[str]]:
"""Constructs a dict of pod names to be queried.
Args:
node_info_file: The file path contains the pod names to query.
The pods' names are put into a Dict of list that keyed by the
role name: clients, servers and driver.
"""
with open(node_info_file, "r") as f:
pod_names = json.load(f)
pod_type_to_name = {"clients": [], "driver": [], "servers": []}
for client in pod_names["Clients"]:
pod_type_to_name["clients"].append(client["Name"])
for server in pod_names["Servers"]:
pod_type_to_name["servers"].append(server["Name"])
pod_type_to_name["driver"].append(pod_names["Driver"]["Name"])
pod_names_to_query = {}
for pod_type in pod_types:
pod_names_to_query[pod_type] = pod_type_to_name[pod_type]
return pod_names_to_query
def convert_UTC_to_epoch(utc_timestamp: str) -> str:
"""Converts a utc timestamp string to epoch time string."""
parsed_time = parser.parse(utc_timestamp)
epoch = parsed_time.strftime("%s")
return epoch
def main() -> None:
argp = argparse.ArgumentParser(
description="Fetch cpu and memory stats from prometheus"
)
argp.add_argument("--url", help="Prometheus base url", required=True)
argp.add_argument(
"--scenario_result_file",
default="scenario_result.json",
type=str,
help="File contains epoch seconds for start and end time",
)
argp.add_argument(
"--node_info_file",
default="/var/data/qps_workers/node_info.json",
help="File contains pod name to query the metrics for",
)
argp.add_argument(
"--pod_type",
action="append",
help=(
"Pod type to query the metrics for, the options are driver, client"
" and server"
),
choices=["driver", "clients", "servers"],
required=True,
)
argp.add_argument(
"--container_name",
action="append",
help="The container names to query the metrics for",
required=True,
)
argp.add_argument(
"--export_file_name",
default="prometheus_query_result.json",
type=str,
help="Name of exported JSON file.",
)
argp.add_argument(
"--quiet",
default=False,
help="Suppress informative output",
)
argp.add_argument(
"--delay_seconds",
default=0,
type=int,
help=(
"Configure delay in seconds to perform Prometheus queries, default"
" is 0"
),
)
args = argp.parse_args()
if not args.quiet:
logging.getLogger().setLevel(logging.DEBUG)
with open(args.scenario_result_file, "r") as q:
scenario_result = json.load(q)
start_time = convert_UTC_to_epoch(
scenario_result["summary"]["startTime"]
)
end_time = convert_UTC_to_epoch(scenario_result["summary"]["endTime"])
p = Prometheus(
url=args.url,
start=start_time,
end=end_time,
)
time.sleep(args.delay_seconds)
pod_dict = construct_pod_dict(args.node_info_file, args.pod_type)
processed_data = p.fetch_cpu_and_memory_data(
container_list=args.container_name, pod_dict=pod_dict
)
processed_data["testDurationSeconds"] = float(end_time) - float(start_time)
logging.debug(json.dumps(processed_data, sort_keys=True, indent=4))
with open(args.export_file_name, "w", encoding="utf8") as export_file:
json.dump(processed_data, export_file, sort_keys=True, indent=4)
if __name__ == "__main__":
main()
| 11,504
| 33.758308
| 91
|
py
|
grpc
|
grpc-master/tools/run_tests/performance/loadtest_concat_yaml.py
|
#!/usr/bin/env python3
# Copyright 2021 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Helper script to concatenate YAML files.
#
# This script concatenates multiple YAML files into a single multipart file.
# Input files are not parsed but processed as strings. This is a convenience
# script to concatenate output files generated by multiple runs of
# loadtest_config.py.
import argparse
import sys
from typing import Iterable
def gen_content_strings(input_files: Iterable[str]) -> Iterable[str]:
if not input_files:
return
with open(input_files[0]) as f:
content = f.read()
yield content
for input_file in input_files[1:]:
with open(input_file) as f:
content = f.read()
yield "---\n"
yield content
def main() -> None:
argp = argparse.ArgumentParser(description="Concatenates YAML files.")
argp.add_argument(
"-i",
"--inputs",
action="extend",
nargs="+",
type=str,
required=True,
help="Input files.",
)
argp.add_argument(
"-o",
"--output",
type=str,
help="Concatenated output file. Output to stdout if not set.",
)
args = argp.parse_args()
with open(args.output, "w") if args.output else sys.stdout as f:
for content in gen_content_strings(args.inputs):
print(content, file=f, sep="", end="")
if __name__ == "__main__":
main()
| 1,965
| 27.492754
| 76
|
py
|
grpc
|
grpc-master/tools/run_tests/performance/scenario_config_exporter.py
|
#!/usr/bin/env python3
# Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Library to extract scenario definitions from scenario_config.py.
#
# Contains functions to filter, analyze and dump scenario definitions.
#
# This library is used in loadtest_config.py to generate the "scenariosJSON"
# field in the format accepted by the OSS benchmarks framework.
# See https://github.com/grpc/test-infra/blob/master/config/samples/cxx_example_loadtest.yaml
#
# It can also be used to dump scenarios to files, to count scenarios by
# language, and to export scenario languages in a format that can be used for
# automation.
#
# Example usage:
#
# scenario_config.py --export_scenarios -l cxx -f cxx_scenario_ -r '.*' \
# --category=scalable
#
# scenario_config.py --count_scenarios
#
# scenario_config.py --count_scenarios --category=scalable
#
# For usage of the language config output, see loadtest_config.py.
import argparse
import collections
import json
import re
import sys
from typing import Any, Callable, Dict, Iterable, NamedTuple
import scenario_config
# Language parameters for load test config generation.
LanguageConfig = NamedTuple(
"LanguageConfig",
[
("category", str),
("language", str),
("client_language", str),
("server_language", str),
],
)
def category_string(categories: Iterable[str], category: str) -> str:
"""Converts a list of categories into a single string for counting."""
if category != "all":
return category if category in categories else ""
main_categories = ("scalable", "smoketest")
s = set(categories)
c = [m for m in main_categories if m in s]
s.difference_update(main_categories)
c.extend(s)
return " ".join(c)
def gen_scenario_languages(category: str) -> Iterable[LanguageConfig]:
"""Generates tuples containing the languages specified in each scenario."""
for language in scenario_config.LANGUAGES:
for scenario in scenario_config.LANGUAGES[language].scenarios():
client_language = scenario.get("CLIENT_LANGUAGE", "")
server_language = scenario.get("SERVER_LANGUAGE", "")
categories = scenario.get("CATEGORIES", [])
if category != "all" and category not in categories:
continue
cat = category_string(categories, category)
yield LanguageConfig(
category=cat,
language=language,
client_language=client_language,
server_language=server_language,
)
def scenario_filter(
scenario_name_regex: str = ".*",
category: str = "all",
client_language: str = "",
server_language: str = "",
) -> Callable[[Dict[str, Any]], bool]:
"""Returns a function to filter scenarios to process."""
def filter_scenario(scenario: Dict[str, Any]) -> bool:
"""Filters scenarios that match specified criteria."""
if not re.search(scenario_name_regex, scenario["name"]):
return False
# if the 'CATEGORIES' key is missing, treat scenario as part of
# 'scalable' and 'smoketest'. This matches the behavior of
# run_performance_tests.py.
scenario_categories = scenario.get(
"CATEGORIES", ["scalable", "smoketest"]
)
if category not in scenario_categories and category != "all":
return False
scenario_client_language = scenario.get("CLIENT_LANGUAGE", "")
if client_language != scenario_client_language:
return False
scenario_server_language = scenario.get("SERVER_LANGUAGE", "")
if server_language != scenario_server_language:
return False
return True
return filter_scenario
def gen_scenarios(
language_name: str,
scenario_filter_function: Callable[[Dict[str, Any]], bool],
) -> Iterable[Dict[str, Any]]:
"""Generates scenarios that match a given filter function."""
return map(
scenario_config.remove_nonproto_fields,
filter(
scenario_filter_function,
scenario_config.LANGUAGES[language_name].scenarios(),
),
)
def dump_to_json_files(
scenarios: Iterable[Dict[str, Any]], filename_prefix: str
) -> None:
"""Dumps a list of scenarios to JSON files"""
count = 0
for scenario in scenarios:
filename = "{}{}.json".format(filename_prefix, scenario["name"])
print("Writing file {}".format(filename), file=sys.stderr)
with open(filename, "w") as outfile:
# The dump file should have {"scenarios" : []} as the top level
# element, when embedded in a LoadTest configuration YAML file.
json.dump({"scenarios": [scenario]}, outfile, indent=2)
count += 1
print("Wrote {} scenarios".format(count), file=sys.stderr)
def main() -> None:
language_choices = sorted(scenario_config.LANGUAGES.keys())
argp = argparse.ArgumentParser(description="Exports scenarios to files.")
argp.add_argument(
"--export_scenarios",
action="store_true",
help="Export scenarios to JSON files.",
)
argp.add_argument(
"--count_scenarios",
action="store_true",
help="Count scenarios for all test languages.",
)
argp.add_argument(
"-l", "--language", choices=language_choices, help="Language to export."
)
argp.add_argument(
"-f",
"--filename_prefix",
default="scenario_dump_",
type=str,
help="Prefix for exported JSON file names.",
)
argp.add_argument(
"-r",
"--regex",
default=".*",
type=str,
help="Regex to select scenarios to run.",
)
argp.add_argument(
"--category",
default="all",
choices=["all", "inproc", "scalable", "smoketest", "sweep"],
help="Select scenarios for a category of tests.",
)
argp.add_argument(
"--client_language",
default="",
choices=language_choices,
help="Select only scenarios with a specified client language.",
)
argp.add_argument(
"--server_language",
default="",
choices=language_choices,
help="Select only scenarios with a specified server language.",
)
args = argp.parse_args()
if args.export_scenarios and not args.language:
print(
"Dumping scenarios requires a specified language.", file=sys.stderr
)
argp.print_usage(file=sys.stderr)
return
if args.export_scenarios:
s_filter = scenario_filter(
scenario_name_regex=args.regex,
category=args.category,
client_language=args.client_language,
server_language=args.server_language,
)
scenarios = gen_scenarios(args.language, s_filter)
dump_to_json_files(scenarios, args.filename_prefix)
if args.count_scenarios:
print(
"Scenario count for all languages (category: {}):".format(
args.category
)
)
print(
"{:>5} {:16} {:8} {:8} {}".format(
"Count", "Language", "Client", "Server", "Categories"
)
)
c = collections.Counter(gen_scenario_languages(args.category))
total = 0
for (cat, l, cl, sl), count in c.most_common():
print(
"{count:5} {l:16} {cl:8} {sl:8} {cat}".format(
l=l, cl=cl, sl=sl, count=count, cat=cat
)
)
total += count
print(
"\n{:>5} total scenarios (category: {})".format(
total, args.category
)
)
if __name__ == "__main__":
main()
| 8,294
| 31.529412
| 93
|
py
|
grpc
|
grpc-master/tools/run_tests/performance/__init__.py
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 577
| 40.285714
| 74
|
py
|
grpc
|
grpc-master/tools/run_tests/performance/loadtest_config.py
|
#!/usr/bin/env python3
# Copyright 2021 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Script to generate test configurations for the OSS benchmarks framework.
#
# This script filters test scenarios and generates uniquely named configurations
# for each test. Configurations are dumped in multipart YAML format.
#
# See documentation below:
# https://github.com/grpc/grpc/blob/master/tools/run_tests/performance/README.md#grpc-oss-benchmarks
import argparse
import collections
import copy
import datetime
import itertools
import json
import os
import string
import sys
from typing import Any, Callable, Dict, Iterable, List, Mapping, Optional, Type
import yaml
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import scenario_config
import scenario_config_exporter
CONFIGURATION_FILE_HEADER_COMMENT = """
# Load test configurations generated from a template by loadtest_config.py.
# See documentation below:
# https://github.com/grpc/grpc/blob/master/tools/run_tests/performance/README.md#grpc-oss-benchmarks
"""
def safe_name(language: str) -> str:
"""Returns a name that is safe to use in labels and file names."""
return scenario_config.LANGUAGES[language].safename
def default_prefix() -> str:
"""Constructs and returns a default prefix for LoadTest names."""
return os.environ.get("USER", "loadtest")
def now_string() -> str:
"""Returns the current date and time in string format."""
return datetime.datetime.now().strftime("%Y%m%d%H%M%S")
def validate_loadtest_name(name: str) -> None:
"""Validates that a LoadTest name is in the expected format."""
if len(name) > 253:
raise ValueError(
"LoadTest name must be less than 253 characters long: %s" % name
)
if not all(c.isalnum() and not c.isupper() for c in name if c != "-"):
raise ValueError("Invalid characters in LoadTest name: %s" % name)
if not name or not name[0].isalpha() or name[-1] == "-":
raise ValueError("Invalid format for LoadTest name: %s" % name)
def loadtest_base_name(
scenario_name: str, uniquifier_elements: Iterable[str]
) -> str:
"""Constructs and returns the base name for a LoadTest resource."""
name_elements = scenario_name.split("_")
name_elements.extend(uniquifier_elements)
return "-".join(element.lower() for element in name_elements)
def loadtest_name(
prefix: str, scenario_name: str, uniquifier_elements: Iterable[str]
) -> str:
"""Constructs and returns a valid name for a LoadTest resource."""
base_name = loadtest_base_name(scenario_name, uniquifier_elements)
name_elements = []
if prefix:
name_elements.append(prefix)
name_elements.append(base_name)
name = "-".join(name_elements)
validate_loadtest_name(name)
return name
def component_name(elements: Iterable[str]) -> str:
"""Constructs a component name from possibly empty elements."""
return "-".join((e for e in elements if e))
def validate_annotations(annotations: Dict[str, str]) -> None:
"""Validates that annotations do not contain reserved names.
These names are automatically added by the config generator.
"""
names = set(("scenario", "uniquifier")).intersection(annotations)
if names:
raise ValueError("Annotations contain reserved names: %s" % names)
def gen_run_indices(runs_per_test: int) -> Iterable[str]:
"""Generates run indices for multiple runs, as formatted strings."""
if runs_per_test < 2:
yield ""
return
index_length = len("{:d}".format(runs_per_test - 1))
index_fmt = "{{:0{:d}d}}".format(index_length)
for i in range(runs_per_test):
yield index_fmt.format(i)
def scenario_name(
base_name: str,
client_channels: Optional[int],
server_threads: Optional[int],
offered_load: Optional[int],
):
"""Constructs scenario name from base name and modifiers."""
elements = [base_name]
if client_channels:
elements.append("{:d}channels".format(client_channels))
if server_threads:
elements.append("{:d}threads".format(server_threads))
if offered_load:
elements.append("{:d}load".format(offered_load))
return "_".join(elements)
def scenario_transform_function(
client_channels: Optional[int],
server_threads: Optional[int],
offered_loads: Optional[Iterable[int]],
) -> Optional[
Callable[[Iterable[Mapping[str, Any]]], Iterable[Mapping[str, Any]]]
]:
"""Returns a transform to be applied to a list of scenarios."""
if not any((client_channels, server_threads, len(offered_loads))):
return lambda s: s
def _transform(
scenarios: Iterable[Mapping[str, Any]]
) -> Iterable[Mapping[str, Any]]:
"""Transforms scenarios by inserting num of client channels, number of async_server_threads and offered_load."""
for base_scenario in scenarios:
base_name = base_scenario["name"]
if client_channels:
base_scenario["client_config"][
"client_channels"
] = client_channels
if server_threads:
base_scenario["server_config"][
"async_server_threads"
] = server_threads
if not offered_loads:
base_scenario["name"] = scenario_name(
base_name, client_channels, server_threads, 0
)
yield base_scenario
return
for offered_load in offered_loads:
scenario = copy.deepcopy(base_scenario)
scenario["client_config"]["load_params"] = {
"poisson": {"offered_load": offered_load}
}
scenario["name"] = scenario_name(
base_name, client_channels, server_threads, offered_load
)
yield scenario
return _transform
def gen_loadtest_configs(
base_config: Mapping[str, Any],
base_config_clients: Iterable[Mapping[str, Any]],
base_config_servers: Iterable[Mapping[str, Any]],
scenario_name_regex: str,
language_config: scenario_config_exporter.LanguageConfig,
loadtest_name_prefix: str,
uniquifier_elements: Iterable[str],
annotations: Mapping[str, str],
instances_per_client: int = 1,
runs_per_test: int = 1,
scenario_transform: Callable[
[Iterable[Mapping[str, Any]]], List[Dict[str, Any]]
] = lambda s: s,
) -> Iterable[Dict[str, Any]]:
"""Generates LoadTest configurations for a given language config.
The LoadTest configurations are generated as YAML objects.
"""
validate_annotations(annotations)
prefix = loadtest_name_prefix or default_prefix()
cl = safe_name(language_config.client_language or language_config.language)
sl = safe_name(language_config.server_language or language_config.language)
scenario_filter = scenario_config_exporter.scenario_filter(
scenario_name_regex=scenario_name_regex,
category=language_config.category,
client_language=language_config.client_language,
server_language=language_config.server_language,
)
scenarios = scenario_transform(
scenario_config_exporter.gen_scenarios(
language_config.language, scenario_filter
)
)
for scenario in scenarios:
for run_index in gen_run_indices(runs_per_test):
uniq = (
uniquifier_elements + [run_index]
if run_index
else uniquifier_elements
)
name = loadtest_name(prefix, scenario["name"], uniq)
scenario_str = (
json.dumps({"scenarios": scenario}, indent=" ") + "\n"
)
config = copy.deepcopy(base_config)
metadata = config["metadata"]
metadata["name"] = name
if "labels" not in metadata:
metadata["labels"] = dict()
metadata["labels"]["language"] = safe_name(language_config.language)
metadata["labels"]["prefix"] = prefix
if "annotations" not in metadata:
metadata["annotations"] = dict()
metadata["annotations"].update(annotations)
metadata["annotations"].update(
{
"scenario": scenario["name"],
"uniquifier": "-".join(uniq),
}
)
spec = config["spec"]
# Select clients with the required language.
clients = [
client
for client in base_config_clients
if client["language"] == cl
]
if not clients:
raise IndexError(
"Client language not found in template: %s" % cl
)
# Validate config for additional client instances.
if instances_per_client > 1:
c = collections.Counter(
(client.get("name", "") for client in clients)
)
if max(c.values()) > 1:
raise ValueError(
"Multiple instances of multiple clients requires "
"unique names, name counts for language %s: %s"
% (cl, c.most_common())
)
# Name client instances with an index starting from zero.
client_instances = []
for i in range(instances_per_client):
client_instances.extend(copy.deepcopy(clients))
for client in client_instances[-len(clients) :]:
client["name"] = component_name(
(client.get("name", ""), str(i))
)
# Set clients to named instances.
spec["clients"] = client_instances
# Select servers with the required language.
servers = copy.deepcopy(
[
server
for server in base_config_servers
if server["language"] == sl
]
)
if not servers:
raise IndexError(
"Server language not found in template: %s" % sl
)
# Name servers with an index for consistency with clients.
for i, server in enumerate(servers):
server["name"] = component_name(
(server.get("name", ""), str(i))
)
# Set servers to named instances.
spec["servers"] = servers
# Add driver, if needed.
if "driver" not in spec:
spec["driver"] = dict()
# Ensure driver has language and run fields.
driver = spec["driver"]
if "language" not in driver:
driver["language"] = safe_name("c++")
if "run" not in driver:
driver["run"] = dict()
# Name the driver with an index for consistency with workers.
# There is only one driver, so the index is zero.
if "name" not in driver or not driver["name"]:
driver["name"] = "0"
spec["scenariosJSON"] = scenario_str
yield config
def parse_key_value_args(args: Optional[Iterable[str]]) -> Dict[str, str]:
"""Parses arguments in the form key=value into a dictionary."""
d = dict()
if args is None:
return d
for arg in args:
key, equals, value = arg.partition("=")
if equals != "=":
raise ValueError("Expected key=value: " + value)
d[key] = value
return d
def clear_empty_fields(config: Dict[str, Any]) -> None:
"""Clears fields set to empty values by string substitution."""
spec = config["spec"]
if "clients" in spec:
for client in spec["clients"]:
if "pool" in client and not client["pool"]:
del client["pool"]
if "servers" in spec:
for server in spec["servers"]:
if "pool" in server and not server["pool"]:
del server["pool"]
if "driver" in spec:
driver = spec["driver"]
if "pool" in driver and not driver["pool"]:
del driver["pool"]
if (
"run" in driver
and "image" in driver["run"]
and not driver["run"]["image"]
):
del driver["run"]["image"]
if "results" in spec and not (
"bigQueryTable" in spec["results"] and spec["results"]["bigQueryTable"]
):
del spec["results"]
def config_dumper(header_comment: str) -> Type[yaml.SafeDumper]:
"""Returns a custom dumper to dump configurations in the expected format."""
class ConfigDumper(yaml.SafeDumper):
def expect_stream_start(self):
super().expect_stream_start()
if isinstance(self.event, yaml.StreamStartEvent):
self.write_indent()
self.write_indicator(header_comment, need_whitespace=False)
def str_presenter(dumper, data):
if "\n" in data:
return dumper.represent_scalar(
"tag:yaml.org,2002:str", data, style="|"
)
return dumper.represent_scalar("tag:yaml.org,2002:str", data)
ConfigDumper.add_representer(str, str_presenter)
return ConfigDumper
def main() -> None:
language_choices = sorted(scenario_config.LANGUAGES.keys())
argp = argparse.ArgumentParser(
description="Generates load test configs from a template.",
fromfile_prefix_chars="@",
)
argp.add_argument(
"-l",
"--language",
action="append",
choices=language_choices,
required=True,
help="Language(s) to benchmark.",
dest="languages",
)
argp.add_argument(
"-t",
"--template",
type=str,
required=True,
help="LoadTest configuration yaml file template.",
)
argp.add_argument(
"-s",
"--substitution",
action="append",
default=[],
help="Template substitution(s), in the form key=value.",
dest="substitutions",
)
argp.add_argument(
"-p", "--prefix", default="", type=str, help="Test name prefix."
)
argp.add_argument(
"-u",
"--uniquifier_element",
action="append",
default=[],
help="String element(s) to make the test name unique.",
dest="uniquifier_elements",
)
argp.add_argument(
"-d",
action="store_true",
help="Use creation date and time as an additional uniquifier element.",
)
argp.add_argument(
"-a",
"--annotation",
action="append",
default=[],
help="metadata.annotation(s), in the form key=value.",
dest="annotations",
)
argp.add_argument(
"-r",
"--regex",
default=".*",
type=str,
help="Regex to select scenarios to run.",
)
argp.add_argument(
"--category",
choices=["all", "inproc", "scalable", "smoketest", "sweep", "psm"],
default="all",
help="Select a category of tests to run.",
)
argp.add_argument(
"--allow_client_language",
action="append",
choices=language_choices,
default=[],
help="Allow cross-language scenarios with this client language.",
dest="allow_client_languages",
)
argp.add_argument(
"--allow_server_language",
action="append",
choices=language_choices,
default=[],
help="Allow cross-language scenarios with this server language.",
dest="allow_server_languages",
)
argp.add_argument(
"--instances_per_client",
default=1,
type=int,
help="Number of instances to generate for each client.",
)
argp.add_argument(
"--runs_per_test",
default=1,
type=int,
help="Number of copies to generate for each test.",
)
argp.add_argument(
"-o",
"--output",
type=str,
help="Output file name. Output to stdout if not set.",
)
argp.add_argument(
"--client_channels", type=int, help="Number of client channels."
)
argp.add_argument(
"--server_threads", type=int, help="Number of async server threads."
)
argp.add_argument(
"--offered_loads",
nargs="*",
type=int,
default=[],
help=(
"A list of QPS values at which each load test scenario will be run."
),
)
args = argp.parse_args()
if args.instances_per_client < 1:
argp.error("instances_per_client must be greater than zero.")
if args.runs_per_test < 1:
argp.error("runs_per_test must be greater than zero.")
# Config generation ignores environment variables that are passed by the
# controller at runtime.
substitutions = {
"DRIVER_PORT": "${DRIVER_PORT}",
"KILL_AFTER": "${KILL_AFTER}",
"POD_TIMEOUT": "${POD_TIMEOUT}",
}
# The user can override the ignored variables above by passing them in as
# substitution keys.
substitutions.update(parse_key_value_args(args.substitutions))
uniquifier_elements = args.uniquifier_elements
if args.d:
uniquifier_elements.append(now_string())
annotations = parse_key_value_args(args.annotations)
transform = scenario_transform_function(
args.client_channels, args.server_threads, args.offered_loads
)
with open(args.template) as f:
base_config = yaml.safe_load(
string.Template(f.read()).substitute(substitutions)
)
clear_empty_fields(base_config)
spec = base_config["spec"]
base_config_clients = spec["clients"]
del spec["clients"]
base_config_servers = spec["servers"]
del spec["servers"]
client_languages = [""] + args.allow_client_languages
server_languages = [""] + args.allow_server_languages
config_generators = []
for l, cl, sl in itertools.product(
args.languages, client_languages, server_languages
):
language_config = scenario_config_exporter.LanguageConfig(
category=args.category,
language=l,
client_language=cl,
server_language=sl,
)
config_generators.append(
gen_loadtest_configs(
base_config,
base_config_clients,
base_config_servers,
args.regex,
language_config,
loadtest_name_prefix=args.prefix,
uniquifier_elements=uniquifier_elements,
annotations=annotations,
instances_per_client=args.instances_per_client,
runs_per_test=args.runs_per_test,
scenario_transform=transform,
)
)
configs = (config for config in itertools.chain(*config_generators))
with open(args.output, "w") if args.output else sys.stdout as f:
yaml.dump_all(
configs,
stream=f,
Dumper=config_dumper(CONFIGURATION_FILE_HEADER_COMMENT.strip()),
default_flow_style=False,
)
if __name__ == "__main__":
main()
| 19,868
| 32.281407
| 120
|
py
|
grpc
|
grpc-master/tools/run_tests/performance/scenario_config.py
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# performance scenario configuration for various languages
import math
WARMUP_SECONDS = 5
JAVA_WARMUP_SECONDS = 15 # Java needs more warmup time for JIT to kick in.
BENCHMARK_SECONDS = 30
SMOKETEST = "smoketest"
SCALABLE = "scalable"
INPROC = "inproc"
SWEEP = "sweep"
PSM = "psm"
DEFAULT_CATEGORIES = (SCALABLE, SMOKETEST)
SECURE_SECARGS = {
"use_test_ca": True,
"server_host_override": "foo.test.google.fr",
}
HISTOGRAM_PARAMS = {
"resolution": 0.01,
"max_possible": 60e9,
}
# target number of RPCs outstanding on across all client channels in
# non-ping-pong tests (since we can only specify per-channel numbers, the
# actual target will be slightly higher)
OUTSTANDING_REQUESTS = {"async": 6400, "async-limited": 800, "sync": 1000}
# wide is the number of client channels in multi-channel tests (1 otherwise)
WIDE = 64
def _get_secargs(is_secure):
if is_secure:
return SECURE_SECARGS
else:
return None
def remove_nonproto_fields(scenario):
"""Removes special-purpose fields that don't belong in the protobuf.
This function removes additional information about the scenario that is not
included in the ScenarioConfig protobuf message.
"""
scenario.pop("CATEGORIES", None)
scenario.pop("CLIENT_LANGUAGE", None)
scenario.pop("SERVER_LANGUAGE", None)
scenario.pop("EXCLUDED_POLL_ENGINES", None)
return scenario
def geometric_progression(start, stop, step):
n = start
while n < stop:
yield int(round(n))
n *= step
def _payload_type(use_generic_payload, req_size, resp_size):
r = {}
sizes = {
"req_size": req_size,
"resp_size": resp_size,
}
if use_generic_payload:
r["bytebuf_params"] = sizes
else:
r["simple_params"] = sizes
return r
def _load_params(offered_load):
r = {}
if offered_load is None:
r["closed_loop"] = {}
else:
load = {}
load["offered_load"] = offered_load
r["poisson"] = load
return r
def _add_channel_arg(config, key, value):
if "channel_args" in config:
channel_args = config["channel_args"]
else:
channel_args = []
config["channel_args"] = channel_args
arg = {"name": key}
if isinstance(value, int):
arg["int_value"] = value
else:
arg["str_value"] = value
channel_args.append(arg)
def _ping_pong_scenario(
name,
rpc_type,
client_type,
server_type,
secure=True,
use_generic_payload=False,
req_size=0,
resp_size=0,
unconstrained_client=None,
client_language=None,
server_language=None,
async_server_threads=0,
client_processes=0,
server_processes=0,
server_threads_per_cq=0,
client_threads_per_cq=0,
warmup_seconds=WARMUP_SECONDS,
categories=None,
channels=None,
outstanding=None,
num_clients=None,
resource_quota_size=None,
messages_per_stream=None,
excluded_poll_engines=None,
minimal_stack=False,
offered_load=None,
):
"""Creates a basic ping pong scenario."""
scenario = {
"name": name,
"num_servers": 1,
"num_clients": 1,
"client_config": {
"client_type": client_type,
"security_params": _get_secargs(secure),
"outstanding_rpcs_per_channel": 1,
"client_channels": 1,
"async_client_threads": 1,
"client_processes": client_processes,
"threads_per_cq": client_threads_per_cq,
"rpc_type": rpc_type,
"histogram_params": HISTOGRAM_PARAMS,
"channel_args": [],
},
"server_config": {
"server_type": server_type,
"security_params": _get_secargs(secure),
"async_server_threads": async_server_threads,
"server_processes": server_processes,
"threads_per_cq": server_threads_per_cq,
"channel_args": [],
},
"warmup_seconds": warmup_seconds,
"benchmark_seconds": BENCHMARK_SECONDS,
"CATEGORIES": list(DEFAULT_CATEGORIES),
"EXCLUDED_POLL_ENGINES": [],
}
if resource_quota_size:
scenario["server_config"]["resource_quota_size"] = resource_quota_size
if use_generic_payload:
if server_type != "ASYNC_GENERIC_SERVER":
raise Exception("Use ASYNC_GENERIC_SERVER for generic payload.")
scenario["server_config"]["payload_config"] = _payload_type(
use_generic_payload, req_size, resp_size
)
scenario["client_config"]["payload_config"] = _payload_type(
use_generic_payload, req_size, resp_size
)
# Optimization target of 'throughput' does not work well with epoll1 polling
# engine. Use the default value of 'blend'
optimization_target = "throughput"
if unconstrained_client:
outstanding_calls = (
outstanding
if outstanding is not None
else OUTSTANDING_REQUESTS[unconstrained_client]
)
# clamp buffer usage to something reasonable (16 gig for now)
MAX_MEMORY_USE = 16 * 1024 * 1024 * 1024
if outstanding_calls * max(req_size, resp_size) > MAX_MEMORY_USE:
outstanding_calls = max(
1, MAX_MEMORY_USE / max(req_size, resp_size)
)
wide = channels if channels is not None else WIDE
deep = int(math.ceil(1.0 * outstanding_calls / wide))
scenario["num_clients"] = (
num_clients if num_clients is not None else 0
) # use as many clients as available.
scenario["client_config"]["outstanding_rpcs_per_channel"] = deep
scenario["client_config"]["client_channels"] = wide
scenario["client_config"]["async_client_threads"] = 0
if offered_load is not None:
optimization_target = "latency"
else:
scenario["client_config"]["outstanding_rpcs_per_channel"] = 1
scenario["client_config"]["client_channels"] = 1
scenario["client_config"]["async_client_threads"] = 1
optimization_target = "latency"
scenario["client_config"]["load_params"] = _load_params(offered_load)
optimization_channel_arg = {
"name": "grpc.optimization_target",
"str_value": optimization_target,
}
scenario["client_config"]["channel_args"].append(optimization_channel_arg)
scenario["server_config"]["channel_args"].append(optimization_channel_arg)
if minimal_stack:
_add_channel_arg(scenario["client_config"], "grpc.minimal_stack", 1)
_add_channel_arg(scenario["server_config"], "grpc.minimal_stack", 1)
if messages_per_stream:
scenario["client_config"]["messages_per_stream"] = messages_per_stream
if client_language:
# the CLIENT_LANGUAGE field is recognized by run_performance_tests.py
scenario["CLIENT_LANGUAGE"] = client_language
if server_language:
# the SERVER_LANGUAGE field is recognized by run_performance_tests.py
scenario["SERVER_LANGUAGE"] = server_language
if categories:
scenario["CATEGORIES"] = categories
if excluded_poll_engines:
# The polling engines for which this scenario is excluded
scenario["EXCLUDED_POLL_ENGINES"] = excluded_poll_engines
return scenario
class Language(object):
@property
def safename(self):
return str(self)
class CXXLanguage(Language):
@property
def safename(self):
return "cxx"
def worker_cmdline(self):
return ["cmake/build/qps_worker"]
def worker_port_offset(self):
return 0
def scenarios(self):
yield _ping_pong_scenario(
"cpp_protobuf_async_unary_5000rpcs_1KB_psm",
rpc_type="UNARY",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
req_size=1024,
resp_size=1024,
outstanding=5000,
channels=1,
num_clients=1,
secure=False,
async_server_threads=1,
categories=[PSM],
)
# TODO(ctiller): add 70% load latency test
yield _ping_pong_scenario(
"cpp_protobuf_async_unary_1channel_100rpcs_1MB",
rpc_type="UNARY",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
req_size=1024 * 1024,
resp_size=1024 * 1024,
unconstrained_client="async",
outstanding=100,
channels=1,
num_clients=1,
secure=False,
categories=[SWEEP],
)
yield _ping_pong_scenario(
"cpp_protobuf_async_streaming_from_client_1channel_1MB",
rpc_type="STREAMING_FROM_CLIENT",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
req_size=1024 * 1024,
resp_size=1024 * 1024,
unconstrained_client="async",
outstanding=1,
channels=1,
num_clients=1,
secure=False,
categories=[SWEEP],
)
# Scenario was added in https://github.com/grpc/grpc/pull/12987, but its purpose is unclear
# (beyond excercising some params that other scenarios don't)
yield _ping_pong_scenario(
"cpp_protobuf_async_unary_75Kqps_600channel_60Krpcs_300Breq_50Bresp",
rpc_type="UNARY",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
req_size=300,
resp_size=50,
unconstrained_client="async",
outstanding=30000,
channels=300,
offered_load=37500,
secure=False,
async_server_threads=16,
server_threads_per_cq=1,
categories=[SCALABLE],
)
for secure in [True, False]:
secstr = "secure" if secure else "insecure"
smoketest_categories = [SMOKETEST] if secure else []
inproc_categories = [INPROC] if not secure else []
yield _ping_pong_scenario(
"cpp_generic_async_streaming_ping_pong_%s" % secstr,
rpc_type="STREAMING",
client_type="ASYNC_CLIENT",
server_type="ASYNC_GENERIC_SERVER",
use_generic_payload=True,
async_server_threads=1,
secure=secure,
categories=smoketest_categories
+ inproc_categories
+ [SCALABLE],
)
yield _ping_pong_scenario(
"cpp_generic_async_streaming_qps_unconstrained_%s" % secstr,
rpc_type="STREAMING",
client_type="ASYNC_CLIENT",
server_type="ASYNC_GENERIC_SERVER",
unconstrained_client="async",
use_generic_payload=True,
secure=secure,
client_threads_per_cq=2,
server_threads_per_cq=2,
minimal_stack=not secure,
categories=smoketest_categories
+ inproc_categories
+ [SCALABLE],
)
for mps in geometric_progression(10, 20, 10):
yield _ping_pong_scenario(
"cpp_generic_async_streaming_qps_unconstrained_%smps_%s"
% (mps, secstr),
rpc_type="STREAMING",
client_type="ASYNC_CLIENT",
server_type="ASYNC_GENERIC_SERVER",
unconstrained_client="async",
use_generic_payload=True,
secure=secure,
messages_per_stream=mps,
minimal_stack=not secure,
categories=smoketest_categories
+ inproc_categories
+ [SCALABLE],
)
for mps in geometric_progression(1, 200, math.sqrt(10)):
yield _ping_pong_scenario(
"cpp_generic_async_streaming_qps_unconstrained_%smps_%s"
% (mps, secstr),
rpc_type="STREAMING",
client_type="ASYNC_CLIENT",
server_type="ASYNC_GENERIC_SERVER",
unconstrained_client="async",
use_generic_payload=True,
secure=secure,
messages_per_stream=mps,
minimal_stack=not secure,
categories=[SWEEP],
)
yield _ping_pong_scenario(
"cpp_generic_async_streaming_qps_1channel_1MBmsg_%s" % secstr,
rpc_type="STREAMING",
req_size=1024 * 1024,
resp_size=1024 * 1024,
client_type="ASYNC_CLIENT",
server_type="ASYNC_GENERIC_SERVER",
unconstrained_client="async",
use_generic_payload=True,
secure=secure,
minimal_stack=not secure,
categories=inproc_categories + [SCALABLE],
channels=1,
outstanding=100,
)
yield _ping_pong_scenario(
"cpp_generic_async_streaming_qps_unconstrained_64KBmsg_%s"
% secstr,
rpc_type="STREAMING",
req_size=64 * 1024,
resp_size=64 * 1024,
client_type="ASYNC_CLIENT",
server_type="ASYNC_GENERIC_SERVER",
unconstrained_client="async",
use_generic_payload=True,
secure=secure,
minimal_stack=not secure,
categories=inproc_categories + [SCALABLE],
)
yield _ping_pong_scenario(
"cpp_generic_async_streaming_qps_unconstrained_1cq_%s" % secstr,
rpc_type="STREAMING",
client_type="ASYNC_CLIENT",
server_type="ASYNC_GENERIC_SERVER",
unconstrained_client="async-limited",
use_generic_payload=True,
secure=secure,
client_threads_per_cq=1000000,
server_threads_per_cq=1000000,
categories=[SWEEP],
)
yield _ping_pong_scenario(
"cpp_protobuf_async_streaming_qps_unconstrained_1cq_%s"
% secstr,
rpc_type="STREAMING",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
unconstrained_client="async-limited",
secure=secure,
client_threads_per_cq=1000000,
server_threads_per_cq=1000000,
categories=inproc_categories + [SCALABLE],
)
yield _ping_pong_scenario(
"cpp_protobuf_async_unary_qps_unconstrained_1cq_%s" % secstr,
rpc_type="UNARY",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
unconstrained_client="async-limited",
secure=secure,
client_threads_per_cq=1000000,
server_threads_per_cq=1000000,
categories=inproc_categories + [SCALABLE],
)
yield _ping_pong_scenario(
"cpp_generic_async_streaming_qps_one_server_core_%s" % secstr,
rpc_type="STREAMING",
client_type="ASYNC_CLIENT",
server_type="ASYNC_GENERIC_SERVER",
unconstrained_client="async-limited",
use_generic_payload=True,
async_server_threads=1,
minimal_stack=not secure,
secure=secure,
categories=[SWEEP],
)
yield _ping_pong_scenario(
"cpp_protobuf_async_client_sync_server_unary_qps_unconstrained_%s"
% (secstr),
rpc_type="UNARY",
client_type="ASYNC_CLIENT",
server_type="SYNC_SERVER",
unconstrained_client="async",
secure=secure,
minimal_stack=not secure,
categories=smoketest_categories
+ inproc_categories
+ [SCALABLE],
)
yield _ping_pong_scenario(
"cpp_protobuf_async_client_unary_1channel_64wide_128Breq_8MBresp_%s"
% (secstr),
rpc_type="UNARY",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
channels=1,
outstanding=64,
req_size=128,
resp_size=8 * 1024 * 1024,
secure=secure,
minimal_stack=not secure,
categories=inproc_categories + [SCALABLE],
)
yield _ping_pong_scenario(
"cpp_protobuf_async_client_sync_server_streaming_qps_unconstrained_%s"
% secstr,
rpc_type="STREAMING",
client_type="ASYNC_CLIENT",
server_type="SYNC_SERVER",
unconstrained_client="async",
secure=secure,
minimal_stack=not secure,
categories=[SWEEP],
)
yield _ping_pong_scenario(
"cpp_protobuf_async_unary_ping_pong_%s_1MB" % secstr,
rpc_type="UNARY",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
req_size=1024 * 1024,
resp_size=1024 * 1024,
secure=secure,
minimal_stack=not secure,
categories=smoketest_categories
+ inproc_categories
+ [SCALABLE],
)
for rpc_type in [
"unary",
"streaming",
"streaming_from_client",
"streaming_from_server",
]:
for synchronicity in ["sync", "async"]:
yield _ping_pong_scenario(
"cpp_protobuf_%s_%s_ping_pong_%s"
% (synchronicity, rpc_type, secstr),
rpc_type=rpc_type.upper(),
client_type="%s_CLIENT" % synchronicity.upper(),
server_type="%s_SERVER" % synchronicity.upper(),
async_server_threads=1,
minimal_stack=not secure,
secure=secure,
)
for size in geometric_progression(
1, 1024 * 1024 * 1024 + 1, 8
):
yield _ping_pong_scenario(
"cpp_protobuf_%s_%s_qps_unconstrained_%s_%db"
% (synchronicity, rpc_type, secstr, size),
rpc_type=rpc_type.upper(),
req_size=size,
resp_size=size,
client_type="%s_CLIENT" % synchronicity.upper(),
server_type="%s_SERVER" % synchronicity.upper(),
unconstrained_client=synchronicity,
secure=secure,
minimal_stack=not secure,
categories=[SWEEP],
)
maybe_scalable = [SCALABLE]
if (
rpc_type == "streaming_from_server"
and synchronicity == "async"
and secure
):
# protobuf_async_streaming_from_server_qps_unconstrained_secure is very flaky
# and has extremely high variance so running it isn't really useful.
# see b/198275705
maybe_scalable = [SWEEP]
yield _ping_pong_scenario(
"cpp_protobuf_%s_%s_qps_unconstrained_%s"
% (synchronicity, rpc_type, secstr),
rpc_type=rpc_type.upper(),
client_type="%s_CLIENT" % synchronicity.upper(),
server_type="%s_SERVER" % synchronicity.upper(),
unconstrained_client=synchronicity,
secure=secure,
minimal_stack=not secure,
server_threads_per_cq=2,
client_threads_per_cq=2,
categories=inproc_categories + maybe_scalable,
)
# TODO(vjpai): Re-enable this test. It has a lot of timeouts
# and hasn't yet been conclusively identified as a test failure
# or race in the library
# yield _ping_pong_scenario(
# 'cpp_protobuf_%s_%s_qps_unconstrained_%s_500kib_resource_quota' % (synchronicity, rpc_type, secstr),
# rpc_type=rpc_type.upper(),
# client_type='%s_CLIENT' % synchronicity.upper(),
# server_type='%s_SERVER' % synchronicity.upper(),
# unconstrained_client=synchronicity,
# secure=secure,
# categories=smoketest_categories+[SCALABLE],
# resource_quota_size=500*1024)
if rpc_type == "streaming":
for mps in geometric_progression(10, 20, 10):
yield _ping_pong_scenario(
"cpp_protobuf_%s_%s_qps_unconstrained_%smps_%s"
% (synchronicity, rpc_type, mps, secstr),
rpc_type=rpc_type.upper(),
client_type="%s_CLIENT" % synchronicity.upper(),
server_type="%s_SERVER" % synchronicity.upper(),
unconstrained_client=synchronicity,
secure=secure,
messages_per_stream=mps,
minimal_stack=not secure,
categories=inproc_categories + [SCALABLE],
)
for mps in geometric_progression(1, 200, math.sqrt(10)):
yield _ping_pong_scenario(
"cpp_protobuf_%s_%s_qps_unconstrained_%smps_%s"
% (synchronicity, rpc_type, mps, secstr),
rpc_type=rpc_type.upper(),
client_type="%s_CLIENT" % synchronicity.upper(),
server_type="%s_SERVER" % synchronicity.upper(),
unconstrained_client=synchronicity,
secure=secure,
messages_per_stream=mps,
minimal_stack=not secure,
categories=[SWEEP],
)
for channels in geometric_progression(
1, 20000, math.sqrt(10)
):
for outstanding in geometric_progression(
1, 200000, math.sqrt(10)
):
if synchronicity == "sync" and outstanding > 1200:
continue
if outstanding < channels:
continue
yield _ping_pong_scenario(
"cpp_protobuf_%s_%s_qps_unconstrained_%s_%d_channels_%d_outstanding"
% (
synchronicity,
rpc_type,
secstr,
channels,
outstanding,
),
rpc_type=rpc_type.upper(),
client_type="%s_CLIENT" % synchronicity.upper(),
server_type="%s_SERVER" % synchronicity.upper(),
unconstrained_client=synchronicity,
secure=secure,
minimal_stack=not secure,
categories=[SWEEP],
channels=channels,
outstanding=outstanding,
)
def __str__(self):
return "c++"
class CSharpLanguage(Language):
"""The legacy Grpc.Core implementation from grpc/grpc."""
def worker_cmdline(self):
return ["tools/run_tests/performance/run_worker_csharp.sh"]
def worker_port_offset(self):
return 100
def scenarios(self):
yield _ping_pong_scenario(
"csharp_generic_async_streaming_ping_pong",
rpc_type="STREAMING",
client_type="ASYNC_CLIENT",
server_type="ASYNC_GENERIC_SERVER",
use_generic_payload=True,
categories=[SMOKETEST, SCALABLE],
)
yield _ping_pong_scenario(
"csharp_generic_async_streaming_ping_pong_insecure_1MB",
rpc_type="STREAMING",
client_type="ASYNC_CLIENT",
server_type="ASYNC_GENERIC_SERVER",
req_size=1024 * 1024,
resp_size=1024 * 1024,
use_generic_payload=True,
secure=False,
categories=[SMOKETEST, SCALABLE],
)
yield _ping_pong_scenario(
"csharp_generic_async_streaming_qps_unconstrained_insecure",
rpc_type="STREAMING",
client_type="ASYNC_CLIENT",
server_type="ASYNC_GENERIC_SERVER",
unconstrained_client="async",
use_generic_payload=True,
secure=False,
categories=[SMOKETEST, SCALABLE],
)
yield _ping_pong_scenario(
"csharp_protobuf_async_streaming_ping_pong",
rpc_type="STREAMING",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
)
yield _ping_pong_scenario(
"csharp_protobuf_async_unary_ping_pong",
rpc_type="UNARY",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
categories=[SMOKETEST, SCALABLE],
)
yield _ping_pong_scenario(
"csharp_protobuf_sync_to_async_unary_ping_pong",
rpc_type="UNARY",
client_type="SYNC_CLIENT",
server_type="ASYNC_SERVER",
)
yield _ping_pong_scenario(
"csharp_protobuf_async_unary_qps_unconstrained",
rpc_type="UNARY",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
unconstrained_client="async",
categories=[SMOKETEST, SCALABLE],
)
yield _ping_pong_scenario(
"csharp_protobuf_async_streaming_qps_unconstrained",
rpc_type="STREAMING",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
unconstrained_client="async",
categories=[SCALABLE],
)
yield _ping_pong_scenario(
"csharp_to_cpp_protobuf_sync_unary_ping_pong",
rpc_type="UNARY",
client_type="SYNC_CLIENT",
server_type="SYNC_SERVER",
server_language="c++",
async_server_threads=1,
categories=[SMOKETEST, SCALABLE],
)
yield _ping_pong_scenario(
"csharp_to_cpp_protobuf_async_streaming_ping_pong",
rpc_type="STREAMING",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
server_language="c++",
async_server_threads=1,
)
yield _ping_pong_scenario(
"csharp_to_cpp_protobuf_async_unary_qps_unconstrained",
rpc_type="UNARY",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
unconstrained_client="async",
server_language="c++",
categories=[SCALABLE],
)
yield _ping_pong_scenario(
"csharp_to_cpp_protobuf_sync_to_async_unary_qps_unconstrained",
rpc_type="UNARY",
client_type="SYNC_CLIENT",
server_type="ASYNC_SERVER",
unconstrained_client="sync",
server_language="c++",
categories=[SCALABLE],
)
yield _ping_pong_scenario(
"cpp_to_csharp_protobuf_async_unary_qps_unconstrained",
rpc_type="UNARY",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
unconstrained_client="async",
client_language="c++",
categories=[SCALABLE],
)
yield _ping_pong_scenario(
"csharp_protobuf_async_unary_ping_pong_1MB",
rpc_type="UNARY",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
req_size=1024 * 1024,
resp_size=1024 * 1024,
categories=[SMOKETEST, SCALABLE],
)
def __str__(self):
return "csharp"
class DotnetLanguage(Language):
"""The pure C# implementation from grpc/grpc-dotnet."""
def worker_cmdline(self):
# grpc-dotnet worker is only supported by the new GKE based OSS benchmark
# framework, and the worker_cmdline() is only used by run_performance_tests.py
return ["grpc_dotnet_not_supported_by_legacy_performance_runner.sh"]
def worker_port_offset(self):
return 1100
def scenarios(self):
yield _ping_pong_scenario(
"dotnet_generic_async_streaming_ping_pong",
rpc_type="STREAMING",
client_type="ASYNC_CLIENT",
server_type="ASYNC_GENERIC_SERVER",
use_generic_payload=True,
categories=[SMOKETEST, SCALABLE],
)
yield _ping_pong_scenario(
"dotnet_generic_async_streaming_ping_pong_insecure_1MB",
rpc_type="STREAMING",
client_type="ASYNC_CLIENT",
server_type="ASYNC_GENERIC_SERVER",
req_size=1024 * 1024,
resp_size=1024 * 1024,
use_generic_payload=True,
secure=False,
categories=[SMOKETEST, SCALABLE],
)
yield _ping_pong_scenario(
"dotnet_generic_async_streaming_qps_unconstrained_insecure",
rpc_type="STREAMING",
client_type="ASYNC_CLIENT",
server_type="ASYNC_GENERIC_SERVER",
unconstrained_client="async",
use_generic_payload=True,
secure=False,
categories=[SMOKETEST, SCALABLE],
)
yield _ping_pong_scenario(
"dotnet_protobuf_async_streaming_ping_pong",
rpc_type="STREAMING",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
)
yield _ping_pong_scenario(
"dotnet_protobuf_async_unary_ping_pong",
rpc_type="UNARY",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
categories=[SMOKETEST, SCALABLE],
)
yield _ping_pong_scenario(
"dotnet_protobuf_sync_to_async_unary_ping_pong",
rpc_type="UNARY",
client_type="SYNC_CLIENT",
server_type="ASYNC_SERVER",
)
yield _ping_pong_scenario(
"dotnet_protobuf_async_unary_qps_unconstrained",
rpc_type="UNARY",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
unconstrained_client="async",
categories=[SMOKETEST, SCALABLE],
)
yield _ping_pong_scenario(
"dotnet_protobuf_async_streaming_qps_unconstrained",
rpc_type="STREAMING",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
unconstrained_client="async",
categories=[SCALABLE],
)
yield _ping_pong_scenario(
"dotnet_to_cpp_protobuf_sync_unary_ping_pong",
rpc_type="UNARY",
client_type="SYNC_CLIENT",
server_type="SYNC_SERVER",
server_language="c++",
async_server_threads=1,
categories=[SMOKETEST, SCALABLE],
)
yield _ping_pong_scenario(
"dotnet_to_cpp_protobuf_async_streaming_ping_pong",
rpc_type="STREAMING",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
server_language="c++",
async_server_threads=1,
)
yield _ping_pong_scenario(
"dotnet_to_cpp_protobuf_async_unary_qps_unconstrained",
rpc_type="UNARY",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
unconstrained_client="async",
server_language="c++",
categories=[SCALABLE],
)
yield _ping_pong_scenario(
"dotnet_to_cpp_protobuf_sync_to_async_unary_qps_unconstrained",
rpc_type="UNARY",
client_type="SYNC_CLIENT",
server_type="ASYNC_SERVER",
unconstrained_client="sync",
server_language="c++",
categories=[SCALABLE],
)
yield _ping_pong_scenario(
"cpp_to_dotnet_protobuf_async_unary_qps_unconstrained",
rpc_type="UNARY",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
unconstrained_client="async",
client_language="c++",
categories=[SCALABLE],
)
yield _ping_pong_scenario(
"dotnet_protobuf_async_unary_ping_pong_1MB",
rpc_type="UNARY",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
req_size=1024 * 1024,
resp_size=1024 * 1024,
categories=[SMOKETEST, SCALABLE],
)
def __str__(self):
return "dotnet"
class PythonLanguage(Language):
def worker_cmdline(self):
return ["tools/run_tests/performance/run_worker_python.sh"]
def worker_port_offset(self):
return 500
def scenarios(self):
yield _ping_pong_scenario(
"python_protobuf_async_unary_5000rpcs_1KB_psm",
rpc_type="UNARY",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
req_size=1024,
resp_size=1024,
outstanding=5000,
channels=1,
num_clients=1,
secure=False,
async_server_threads=1,
categories=[PSM],
)
yield _ping_pong_scenario(
"python_generic_sync_streaming_ping_pong",
rpc_type="STREAMING",
client_type="SYNC_CLIENT",
server_type="ASYNC_GENERIC_SERVER",
use_generic_payload=True,
categories=[SMOKETEST, SCALABLE],
)
yield _ping_pong_scenario(
"python_protobuf_sync_streaming_ping_pong",
rpc_type="STREAMING",
client_type="SYNC_CLIENT",
server_type="ASYNC_SERVER",
)
yield _ping_pong_scenario(
"python_protobuf_async_unary_ping_pong",
rpc_type="UNARY",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
)
yield _ping_pong_scenario(
"python_protobuf_sync_unary_ping_pong",
rpc_type="UNARY",
client_type="SYNC_CLIENT",
server_type="ASYNC_SERVER",
categories=[SMOKETEST, SCALABLE],
)
yield _ping_pong_scenario(
"python_protobuf_sync_unary_qps_unconstrained",
rpc_type="UNARY",
client_type="SYNC_CLIENT",
server_type="ASYNC_SERVER",
unconstrained_client="sync",
)
yield _ping_pong_scenario(
"python_protobuf_sync_streaming_qps_unconstrained",
rpc_type="STREAMING",
client_type="SYNC_CLIENT",
server_type="ASYNC_SERVER",
unconstrained_client="sync",
)
yield _ping_pong_scenario(
"python_to_cpp_protobuf_sync_unary_ping_pong",
rpc_type="UNARY",
client_type="SYNC_CLIENT",
server_type="ASYNC_SERVER",
server_language="c++",
async_server_threads=0,
categories=[SMOKETEST, SCALABLE],
)
yield _ping_pong_scenario(
"python_to_cpp_protobuf_sync_streaming_ping_pong",
rpc_type="STREAMING",
client_type="SYNC_CLIENT",
server_type="ASYNC_SERVER",
server_language="c++",
async_server_threads=1,
)
yield _ping_pong_scenario(
"python_protobuf_sync_unary_ping_pong_1MB",
rpc_type="UNARY",
client_type="SYNC_CLIENT",
server_type="ASYNC_SERVER",
req_size=1024 * 1024,
resp_size=1024 * 1024,
categories=[SMOKETEST, SCALABLE],
)
def __str__(self):
return "python"
class PythonAsyncIOLanguage(Language):
def worker_cmdline(self):
return ["tools/run_tests/performance/run_worker_python_asyncio.sh"]
def worker_port_offset(self):
return 1200
def scenarios(self):
yield _ping_pong_scenario(
"python_asyncio_protobuf_async_unary_5000rpcs_1KB_psm",
rpc_type="UNARY",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
req_size=1024,
resp_size=1024,
outstanding=5000,
channels=1,
num_clients=1,
secure=False,
async_server_threads=1,
categories=[PSM],
)
for outstanding in [64, 128, 256, 512]:
for channels in [1, 4]:
yield _ping_pong_scenario(
"python_asyncio_protobuf_async_unary_ping_pong_%dx%d_max"
% (
outstanding,
channels,
),
rpc_type="UNARY",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
outstanding=outstanding * channels,
channels=channels,
client_processes=0,
server_processes=0,
unconstrained_client="async",
categories=[SCALABLE],
)
yield _ping_pong_scenario(
"python_asyncio_protobuf_async_unary_ping_pong_%d_1thread"
% outstanding,
rpc_type="UNARY",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
outstanding=outstanding,
channels=1,
client_processes=1,
server_processes=1,
unconstrained_client="async",
categories=[SCALABLE],
)
yield _ping_pong_scenario(
"python_asyncio_generic_async_streaming_ping_pong",
rpc_type="STREAMING",
client_type="ASYNC_CLIENT",
server_type="ASYNC_GENERIC_SERVER",
channels=1,
client_processes=1,
server_processes=1,
use_generic_payload=True,
categories=[SMOKETEST, SCALABLE],
)
yield _ping_pong_scenario(
"python_asyncio_protobuf_async_streaming_ping_pong",
rpc_type="STREAMING",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
channels=1,
client_processes=1,
server_processes=1,
categories=[SMOKETEST, SCALABLE],
)
yield _ping_pong_scenario(
"python_asyncio_protobuf_async_unary_ping_pong",
rpc_type="UNARY",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
client_processes=1,
server_processes=1,
categories=[SMOKETEST, SCALABLE],
)
yield _ping_pong_scenario(
"python_asyncio_protobuf_async_unary_ping_pong",
rpc_type="UNARY",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
channels=1,
client_processes=1,
server_processes=1,
categories=[SMOKETEST, SCALABLE],
)
yield _ping_pong_scenario(
"python_asyncio_protobuf_async_unary_qps_unconstrained",
rpc_type="UNARY",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
channels=1,
unconstrained_client="async",
)
yield _ping_pong_scenario(
"python_asyncio_protobuf_async_streaming_qps_unconstrained",
rpc_type="STREAMING",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
channels=1,
unconstrained_client="async",
)
yield _ping_pong_scenario(
"python_asyncio_to_cpp_protobuf_async_unary_ping_pong_1thread",
rpc_type="UNARY",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
server_language="c++",
channels=1,
client_processes=1,
unconstrained_client="async",
categories=[SMOKETEST, SCALABLE],
)
yield _ping_pong_scenario(
"python_asyncio_to_cpp_protobuf_async_unary_ping_pong_max",
rpc_type="UNARY",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
unconstrained_client="async",
channels=1,
client_processes=0,
server_language="c++",
categories=[SMOKETEST, SCALABLE],
)
yield _ping_pong_scenario(
"python_asyncio_to_cpp_protobuf_sync_streaming_ping_pong_1thread",
rpc_type="STREAMING",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
channels=1,
client_processes=1,
server_processes=1,
unconstrained_client="async",
server_language="c++",
)
yield _ping_pong_scenario(
"python_asyncio_protobuf_async_unary_ping_pong_1MB",
rpc_type="UNARY",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
req_size=1024 * 1024,
resp_size=1024 * 1024,
channels=1,
client_processes=1,
server_processes=1,
categories=[SMOKETEST, SCALABLE],
)
def __str__(self):
return "python_asyncio"
class RubyLanguage(Language):
def worker_cmdline(self):
return ["tools/run_tests/performance/run_worker_ruby.sh"]
def worker_port_offset(self):
return 300
def scenarios(self):
yield _ping_pong_scenario(
"ruby_protobuf_sync_streaming_ping_pong",
rpc_type="STREAMING",
client_type="SYNC_CLIENT",
server_type="SYNC_SERVER",
categories=[SMOKETEST, SCALABLE],
)
yield _ping_pong_scenario(
"ruby_protobuf_unary_ping_pong",
rpc_type="UNARY",
client_type="SYNC_CLIENT",
server_type="SYNC_SERVER",
categories=[SMOKETEST, SCALABLE],
)
yield _ping_pong_scenario(
"ruby_protobuf_sync_unary_qps_unconstrained",
rpc_type="UNARY",
client_type="SYNC_CLIENT",
server_type="SYNC_SERVER",
unconstrained_client="sync",
)
yield _ping_pong_scenario(
"ruby_protobuf_sync_streaming_qps_unconstrained",
rpc_type="STREAMING",
client_type="SYNC_CLIENT",
server_type="SYNC_SERVER",
unconstrained_client="sync",
)
yield _ping_pong_scenario(
"ruby_to_cpp_protobuf_sync_unary_ping_pong",
rpc_type="UNARY",
client_type="SYNC_CLIENT",
server_type="SYNC_SERVER",
server_language="c++",
async_server_threads=1,
)
yield _ping_pong_scenario(
"ruby_to_cpp_protobuf_sync_streaming_ping_pong",
rpc_type="STREAMING",
client_type="SYNC_CLIENT",
server_type="SYNC_SERVER",
server_language="c++",
async_server_threads=1,
)
yield _ping_pong_scenario(
"ruby_protobuf_unary_ping_pong_1MB",
rpc_type="UNARY",
client_type="SYNC_CLIENT",
server_type="SYNC_SERVER",
req_size=1024 * 1024,
resp_size=1024 * 1024,
categories=[SMOKETEST, SCALABLE],
)
def __str__(self):
return "ruby"
class Php7Language(Language):
def __init__(self, php7_protobuf_c=False):
super().__init__()
self.php7_protobuf_c = php7_protobuf_c
def worker_cmdline(self):
if self.php7_protobuf_c:
return [
"tools/run_tests/performance/run_worker_php.sh",
"--use_protobuf_c_extension",
]
return ["tools/run_tests/performance/run_worker_php.sh"]
def worker_port_offset(self):
if self.php7_protobuf_c:
return 900
return 800
def scenarios(self):
php7_extension_mode = "php7_protobuf_php_extension"
if self.php7_protobuf_c:
php7_extension_mode = "php7_protobuf_c_extension"
yield _ping_pong_scenario(
"%s_to_cpp_protobuf_async_unary_5000rpcs_1KB_psm"
% php7_extension_mode,
rpc_type="UNARY",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
server_language="c++",
req_size=1024,
resp_size=1024,
outstanding=5000,
channels=1,
num_clients=1,
secure=False,
async_server_threads=1,
categories=[PSM],
)
yield _ping_pong_scenario(
"%s_to_cpp_protobuf_sync_unary_ping_pong" % php7_extension_mode,
rpc_type="UNARY",
client_type="SYNC_CLIENT",
server_type="SYNC_SERVER",
server_language="c++",
async_server_threads=1,
)
yield _ping_pong_scenario(
"%s_to_cpp_protobuf_sync_streaming_ping_pong" % php7_extension_mode,
rpc_type="STREAMING",
client_type="SYNC_CLIENT",
server_type="SYNC_SERVER",
server_language="c++",
async_server_threads=1,
)
# TODO(ddyihai): Investigate why when async_server_threads=1/CPU usage 340%, the QPS performs
# better than async_server_threads=0/CPU usage 490%.
yield _ping_pong_scenario(
"%s_to_cpp_protobuf_sync_unary_qps_unconstrained"
% php7_extension_mode,
rpc_type="UNARY",
client_type="SYNC_CLIENT",
server_type="ASYNC_SERVER",
server_language="c++",
outstanding=1,
async_server_threads=1,
unconstrained_client="sync",
)
yield _ping_pong_scenario(
"%s_to_cpp_protobuf_sync_streaming_qps_unconstrained"
% php7_extension_mode,
rpc_type="STREAMING",
client_type="SYNC_CLIENT",
server_type="ASYNC_SERVER",
server_language="c++",
outstanding=1,
async_server_threads=1,
unconstrained_client="sync",
)
def __str__(self):
if self.php7_protobuf_c:
return "php7_protobuf_c"
return "php7"
class JavaLanguage(Language):
def worker_cmdline(self):
return ["tools/run_tests/performance/run_worker_java.sh"]
def worker_port_offset(self):
return 400
def scenarios(self):
yield _ping_pong_scenario(
"java_protobuf_async_unary_5000rpcs_1KB_psm",
rpc_type="UNARY",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
req_size=1024,
resp_size=1024,
outstanding=5000,
channels=1,
num_clients=1,
secure=False,
async_server_threads=1,
warmup_seconds=JAVA_WARMUP_SECONDS,
categories=[PSM],
)
for secure in [True, False]:
secstr = "secure" if secure else "insecure"
smoketest_categories = ([SMOKETEST] if secure else []) + [SCALABLE]
yield _ping_pong_scenario(
"java_generic_async_streaming_ping_pong_%s" % secstr,
rpc_type="STREAMING",
client_type="ASYNC_CLIENT",
server_type="ASYNC_GENERIC_SERVER",
use_generic_payload=True,
async_server_threads=1,
secure=secure,
warmup_seconds=JAVA_WARMUP_SECONDS,
categories=smoketest_categories,
)
yield _ping_pong_scenario(
"java_protobuf_async_streaming_ping_pong_%s" % secstr,
rpc_type="STREAMING",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
async_server_threads=1,
secure=secure,
warmup_seconds=JAVA_WARMUP_SECONDS,
)
yield _ping_pong_scenario(
"java_protobuf_async_unary_ping_pong_%s" % secstr,
rpc_type="UNARY",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
async_server_threads=1,
secure=secure,
warmup_seconds=JAVA_WARMUP_SECONDS,
categories=smoketest_categories,
)
yield _ping_pong_scenario(
"java_protobuf_unary_ping_pong_%s" % secstr,
rpc_type="UNARY",
client_type="SYNC_CLIENT",
server_type="SYNC_SERVER",
async_server_threads=1,
secure=secure,
warmup_seconds=JAVA_WARMUP_SECONDS,
)
yield _ping_pong_scenario(
"java_protobuf_async_unary_qps_unconstrained_%s" % secstr,
rpc_type="UNARY",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
unconstrained_client="async",
secure=secure,
warmup_seconds=JAVA_WARMUP_SECONDS,
categories=smoketest_categories + [SCALABLE],
)
yield _ping_pong_scenario(
"java_protobuf_async_streaming_qps_unconstrained_%s" % secstr,
rpc_type="STREAMING",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
unconstrained_client="async",
secure=secure,
warmup_seconds=JAVA_WARMUP_SECONDS,
categories=[SCALABLE],
)
yield _ping_pong_scenario(
"java_generic_async_streaming_qps_unconstrained_%s" % secstr,
rpc_type="STREAMING",
client_type="ASYNC_CLIENT",
server_type="ASYNC_GENERIC_SERVER",
unconstrained_client="async",
use_generic_payload=True,
secure=secure,
warmup_seconds=JAVA_WARMUP_SECONDS,
categories=[SCALABLE],
)
yield _ping_pong_scenario(
"java_generic_async_streaming_qps_one_server_core_%s" % secstr,
rpc_type="STREAMING",
client_type="ASYNC_CLIENT",
server_type="ASYNC_GENERIC_SERVER",
unconstrained_client="async-limited",
use_generic_payload=True,
async_server_threads=1,
secure=secure,
warmup_seconds=JAVA_WARMUP_SECONDS,
)
# TODO(jtattermusch): add scenarios java vs C++
def __str__(self):
return "java"
class GoLanguage(Language):
def worker_cmdline(self):
return ["tools/run_tests/performance/run_worker_go.sh"]
def worker_port_offset(self):
return 600
def scenarios(self):
yield _ping_pong_scenario(
"go_protobuf_async_unary_5000rpcs_1KB_psm",
rpc_type="UNARY",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
req_size=1024,
resp_size=1024,
outstanding=5000,
channels=1,
num_clients=1,
secure=False,
async_server_threads=1,
categories=[PSM],
)
for secure in [True, False]:
secstr = "secure" if secure else "insecure"
smoketest_categories = ([SMOKETEST] if secure else []) + [SCALABLE]
# ASYNC_GENERIC_SERVER for Go actually uses a sync streaming server,
# but that's mostly because of lack of better name of the enum value.
yield _ping_pong_scenario(
"go_generic_sync_streaming_ping_pong_%s" % secstr,
rpc_type="STREAMING",
client_type="SYNC_CLIENT",
server_type="ASYNC_GENERIC_SERVER",
use_generic_payload=True,
async_server_threads=1,
secure=secure,
categories=smoketest_categories,
)
yield _ping_pong_scenario(
"go_protobuf_sync_streaming_ping_pong_%s" % secstr,
rpc_type="STREAMING",
client_type="SYNC_CLIENT",
server_type="SYNC_SERVER",
async_server_threads=1,
secure=secure,
)
yield _ping_pong_scenario(
"go_protobuf_sync_unary_ping_pong_%s" % secstr,
rpc_type="UNARY",
client_type="SYNC_CLIENT",
server_type="SYNC_SERVER",
async_server_threads=1,
secure=secure,
categories=smoketest_categories,
)
# unconstrained_client='async' is intended (client uses goroutines)
yield _ping_pong_scenario(
"go_protobuf_sync_unary_qps_unconstrained_%s" % secstr,
rpc_type="UNARY",
client_type="SYNC_CLIENT",
server_type="SYNC_SERVER",
unconstrained_client="async",
secure=secure,
categories=smoketest_categories + [SCALABLE],
)
# unconstrained_client='async' is intended (client uses goroutines)
yield _ping_pong_scenario(
"go_protobuf_sync_streaming_qps_unconstrained_%s" % secstr,
rpc_type="STREAMING",
client_type="SYNC_CLIENT",
server_type="SYNC_SERVER",
unconstrained_client="async",
secure=secure,
categories=[SCALABLE],
)
# unconstrained_client='async' is intended (client uses goroutines)
# ASYNC_GENERIC_SERVER for Go actually uses a sync streaming server,
# but that's mostly because of lack of better name of the enum value.
yield _ping_pong_scenario(
"go_generic_sync_streaming_qps_unconstrained_%s" % secstr,
rpc_type="STREAMING",
client_type="SYNC_CLIENT",
server_type="ASYNC_GENERIC_SERVER",
unconstrained_client="async",
use_generic_payload=True,
secure=secure,
categories=[SCALABLE],
)
# TODO(jtattermusch): add scenarios go vs C++
def __str__(self):
return "go"
class NodeLanguage(Language):
def __init__(self, node_purejs=False):
super().__init__()
self.node_purejs = node_purejs
def worker_cmdline(self):
fixture = "native_js" if self.node_purejs else "native_native"
return [
"tools/run_tests/performance/run_worker_node.sh",
fixture,
"--benchmark_impl=grpc",
]
def worker_port_offset(self):
if self.node_purejs:
return 1100
return 1000
def scenarios(self):
node_implementation = "node_purejs" if self.node_purejs else "node"
yield _ping_pong_scenario(
"%s_to_node_protobuf_async_unary_5000rpcs_1KB_psm"
% (node_implementation),
rpc_type="UNARY",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
server_language="node",
req_size=1024,
resp_size=1024,
outstanding=5000,
channels=1,
num_clients=1,
secure=False,
async_server_threads=1,
categories=[PSM],
)
for secure in [True, False]:
secstr = "secure" if secure else "insecure"
smoketest_categories = ([SMOKETEST] if secure else []) + [SCALABLE]
yield _ping_pong_scenario(
"%s_to_node_generic_async_streaming_ping_pong_%s"
% (node_implementation, secstr),
rpc_type="STREAMING",
client_type="ASYNC_CLIENT",
server_type="ASYNC_GENERIC_SERVER",
server_language="node",
use_generic_payload=True,
async_server_threads=1,
secure=secure,
categories=smoketest_categories,
)
yield _ping_pong_scenario(
"%s_to_node_protobuf_async_streaming_ping_pong_%s"
% (node_implementation, secstr),
rpc_type="STREAMING",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
server_language="node",
async_server_threads=1,
secure=secure,
)
yield _ping_pong_scenario(
"%s_to_node_protobuf_async_unary_ping_pong_%s"
% (node_implementation, secstr),
rpc_type="UNARY",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
server_language="node",
async_server_threads=1,
secure=secure,
categories=smoketest_categories,
)
yield _ping_pong_scenario(
"%s_to_node_protobuf_async_unary_qps_unconstrained_%s"
% (node_implementation, secstr),
rpc_type="UNARY",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
server_language="node",
unconstrained_client="async",
secure=secure,
categories=smoketest_categories + [SCALABLE],
)
yield _ping_pong_scenario(
"%s_to_node_protobuf_async_streaming_qps_unconstrained_%s"
% (node_implementation, secstr),
rpc_type="STREAMING",
client_type="ASYNC_CLIENT",
server_type="ASYNC_SERVER",
server_language="node",
unconstrained_client="async",
secure=secure,
categories=[SCALABLE],
)
yield _ping_pong_scenario(
"%s_to_node_generic_async_streaming_qps_unconstrained_%s"
% (node_implementation, secstr),
rpc_type="STREAMING",
client_type="ASYNC_CLIENT",
server_type="ASYNC_GENERIC_SERVER",
server_language="node",
unconstrained_client="async",
use_generic_payload=True,
secure=secure,
categories=[SCALABLE],
)
# TODO(murgatroid99): add scenarios node vs C++
def __str__(self):
if self.node_purejs:
return "node_purejs"
return "node"
LANGUAGES = {
"c++": CXXLanguage(),
"csharp": CSharpLanguage(),
"dotnet": DotnetLanguage(),
"ruby": RubyLanguage(),
"php7": Php7Language(),
"php7_protobuf_c": Php7Language(php7_protobuf_c=True),
"java": JavaLanguage(),
"python": PythonLanguage(),
"python_asyncio": PythonAsyncIOLanguage(),
"go": GoLanguage(),
"node": NodeLanguage(),
"node_purejs": NodeLanguage(node_purejs=True),
}
| 61,704
| 34.099545
| 126
|
py
|
grpc
|
grpc-master/tools/run_tests/python_utils/port_server.py
|
#!/usr/bin/env python3
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manage TCP ports for unit tests; started by run_tests.py"""
from __future__ import print_function
import argparse
import hashlib
import os
import platform
import random
import socket
import sys
import threading
import time
from six.moves.BaseHTTPServer import BaseHTTPRequestHandler
from six.moves.BaseHTTPServer import HTTPServer
from six.moves.socketserver import ThreadingMixIn
# increment this number whenever making a change to ensure that
# the changes are picked up by running CI servers
# note that all changes must be backwards compatible
_MY_VERSION = 21
if len(sys.argv) == 2 and sys.argv[1] == "dump_version":
print(_MY_VERSION)
sys.exit(0)
argp = argparse.ArgumentParser(description="Server for httpcli_test")
argp.add_argument("-p", "--port", default=12345, type=int)
argp.add_argument("-l", "--logfile", default=None, type=str)
args = argp.parse_args()
if args.logfile is not None:
sys.stdin.close()
sys.stderr.close()
sys.stdout.close()
sys.stderr = open(args.logfile, "w")
sys.stdout = sys.stderr
print("port server running on port %d" % args.port)
pool = []
in_use = {}
mu = threading.Lock()
# Cronet restricts the following ports to be used (see
# https://cs.chromium.org/chromium/src/net/base/port_util.cc). When one of these
# ports is used in a Cronet test, the test would fail (see issue #12149). These
# ports must be excluded from pool.
cronet_restricted_ports = [
1,
7,
9,
11,
13,
15,
17,
19,
20,
21,
22,
23,
25,
37,
42,
43,
53,
77,
79,
87,
95,
101,
102,
103,
104,
109,
110,
111,
113,
115,
117,
119,
123,
135,
139,
143,
179,
389,
465,
512,
513,
514,
515,
526,
530,
531,
532,
540,
556,
563,
587,
601,
636,
993,
995,
2049,
3659,
4045,
6000,
6665,
6666,
6667,
6668,
6669,
6697,
]
def can_connect(port):
# this test is only really useful on unices where SO_REUSE_PORT is available
# so on Windows, where this test is expensive, skip it
if platform.system() == "Windows":
return False
s = socket.socket()
try:
s.connect(("localhost", port))
return True
except socket.error as e:
return False
finally:
s.close()
def can_bind(port, proto):
s = socket.socket(proto, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
s.bind(("localhost", port))
return True
except socket.error as e:
return False
finally:
s.close()
def refill_pool(max_timeout, req):
"""Scan for ports not marked for being in use"""
chk = [
port
for port in range(1025, 32766)
if port not in cronet_restricted_ports
]
random.shuffle(chk)
for i in chk:
if len(pool) > 100:
break
if i in in_use:
age = time.time() - in_use[i]
if age < max_timeout:
continue
req.log_message("kill old request %d" % i)
del in_use[i]
if (
can_bind(i, socket.AF_INET)
and can_bind(i, socket.AF_INET6)
and not can_connect(i)
):
req.log_message("found available port %d" % i)
pool.append(i)
def allocate_port(req):
global pool
global in_use
global mu
mu.acquire()
max_timeout = 600
while not pool:
refill_pool(max_timeout, req)
if not pool:
req.log_message("failed to find ports: retrying soon")
mu.release()
time.sleep(1)
mu.acquire()
max_timeout /= 2
port = pool[0]
pool = pool[1:]
in_use[port] = time.time()
mu.release()
return port
keep_running = True
class Handler(BaseHTTPRequestHandler):
def setup(self):
# If the client is unreachable for 5 seconds, close the connection
self.timeout = 5
BaseHTTPRequestHandler.setup(self)
def do_GET(self):
global keep_running
global mu
if self.path == "/get":
# allocate a new port, it will stay bound for ten minutes and until
# it's unused
self.send_response(200)
self.send_header("Content-Type", "text/plain")
self.end_headers()
p = allocate_port(self)
self.log_message("allocated port %d" % p)
self.wfile.write(str(p).encode("ascii"))
elif self.path[0:6] == "/drop/":
self.send_response(200)
self.send_header("Content-Type", "text/plain")
self.end_headers()
p = int(self.path[6:])
mu.acquire()
if p in in_use:
del in_use[p]
pool.append(p)
k = "known"
else:
k = "unknown"
mu.release()
self.log_message("drop %s port %d" % (k, p))
elif self.path == "/version_number":
# fetch a version string and the current process pid
self.send_response(200)
self.send_header("Content-Type", "text/plain")
self.end_headers()
self.wfile.write(str(_MY_VERSION).encode("ascii"))
elif self.path == "/dump":
# yaml module is not installed on Macs and Windows machines by default
# so we import it lazily (/dump action is only used for debugging)
import yaml
self.send_response(200)
self.send_header("Content-Type", "text/plain")
self.end_headers()
mu.acquire()
now = time.time()
out = yaml.dump(
{
"pool": pool,
"in_use": dict(
(k, now - v) for k, v in list(in_use.items())
),
}
)
mu.release()
self.wfile.write(out.encode("ascii"))
elif self.path == "/quitquitquit":
self.send_response(200)
self.end_headers()
self.server.shutdown()
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread"""
ThreadedHTTPServer(("", args.port), Handler).serve_forever()
| 6,993
| 24.0681
| 82
|
py
|
grpc
|
grpc-master/tools/run_tests/python_utils/start_port_server.py
|
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import logging
import os
import socket
import subprocess
import sys
import tempfile
import time
import six.moves.urllib.request as request
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import jobset
# must be synchronized with test/core/util/port_server_client.h
_PORT_SERVER_PORT = 32766
def start_port_server():
# check if a compatible port server is running
# if incompatible (version mismatch) ==> start a new one
# if not running ==> start a new one
# otherwise, leave it up
try:
version = int(
request.urlopen(
"http://localhost:%d/version_number" % _PORT_SERVER_PORT
).read()
)
logging.info("detected port server running version %d", version)
running = True
except Exception as e:
logging.exception("failed to detect port server")
running = False
if running:
current_version = int(
subprocess.check_output(
[
sys.executable, # use the same python binary as this process
os.path.abspath(
"tools/run_tests/python_utils/port_server.py"
),
"dump_version",
]
).decode()
)
logging.info("my port server is version %d", current_version)
running = version >= current_version
if not running:
logging.info("port_server version mismatch: killing the old one")
request.urlopen(
"http://localhost:%d/quitquitquit" % _PORT_SERVER_PORT
).read()
time.sleep(1)
if not running:
fd, logfile = tempfile.mkstemp()
os.close(fd)
logging.info("starting port_server, with log file %s", logfile)
args = [
sys.executable,
os.path.abspath("tools/run_tests/python_utils/port_server.py"),
"-p",
"%d" % _PORT_SERVER_PORT,
"-l",
logfile,
]
env = dict(os.environ)
env["BUILD_ID"] = "pleaseDontKillMeJenkins"
if jobset.platform_string() == "windows":
# Working directory of port server needs to be outside of Jenkins
# workspace to prevent file lock issues.
tempdir = tempfile.mkdtemp()
if sys.version_info.major == 2:
creationflags = 0x00000008 # detached process
else:
creationflags = (
0 # DETACHED_PROCESS doesn't seem to work with python3
)
port_server = subprocess.Popen(
args,
env=env,
cwd=tempdir,
creationflags=creationflags,
close_fds=True,
)
else:
port_server = subprocess.Popen(
args, env=env, preexec_fn=os.setsid, close_fds=True
)
time.sleep(1)
# ensure port server is up
waits = 0
while True:
if waits > 10:
logging.warning(
"killing port server due to excessive start up waits"
)
port_server.kill()
if port_server.poll() is not None:
logging.error("port_server failed to start")
# try one final time: maybe another build managed to start one
time.sleep(1)
try:
request.urlopen(
"http://localhost:%d/get" % _PORT_SERVER_PORT
).read()
logging.info(
"last ditch attempt to contact port server succeeded"
)
break
except:
logging.exception(
"final attempt to contact port server failed"
)
port_log = open(logfile, "r").read()
print(port_log)
sys.exit(1)
try:
port_server_url = "http://localhost:%d/get" % _PORT_SERVER_PORT
request.urlopen(port_server_url).read()
logging.info("port server is up and ready")
break
except socket.timeout:
logging.exception("while waiting for port_server")
time.sleep(1)
waits += 1
except IOError:
logging.exception("while waiting for port_server")
time.sleep(1)
waits += 1
except:
logging.exception(
(
'error while contacting port server at "%s".'
"Will try killing it."
),
port_server_url,
)
port_server.kill()
raise
| 5,527
| 34.435897
| 81
|
py
|
grpc
|
grpc-master/tools/run_tests/python_utils/filter_pull_request_tests.py
|
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Filter out tests based on file differences compared to merge target branch"""
from __future__ import print_function
import re
import subprocess
import six
class TestSuite:
"""
Contains label to identify job as belonging to this test suite and
triggers to identify if changed files are relevant
"""
def __init__(self, labels):
"""
Build TestSuite to group tests based on labeling
:param label: strings that should match a jobs's platform, config, language, or test group
"""
self.triggers = []
self.labels = labels
def add_trigger(self, trigger):
"""
Add a regex to list of triggers that determine if a changed file should run tests
:param trigger: regex matching file relevant to tests
"""
self.triggers.append(trigger)
# Create test suites
_CORE_TEST_SUITE = TestSuite(["c"])
_CPP_TEST_SUITE = TestSuite(["c++"])
_CSHARP_TEST_SUITE = TestSuite(["csharp"])
_NODE_TEST_SUITE = TestSuite(["grpc-node"])
_OBJC_TEST_SUITE = TestSuite(["objc"])
_PHP_TEST_SUITE = TestSuite(["php", "php7"])
_PYTHON_TEST_SUITE = TestSuite(["python"])
_RUBY_TEST_SUITE = TestSuite(["ruby"])
_LINUX_TEST_SUITE = TestSuite(["linux"])
_WINDOWS_TEST_SUITE = TestSuite(["windows"])
_MACOS_TEST_SUITE = TestSuite(["macos"])
_ALL_TEST_SUITES = [
_CORE_TEST_SUITE,
_CPP_TEST_SUITE,
_CSHARP_TEST_SUITE,
_NODE_TEST_SUITE,
_OBJC_TEST_SUITE,
_PHP_TEST_SUITE,
_PYTHON_TEST_SUITE,
_RUBY_TEST_SUITE,
_LINUX_TEST_SUITE,
_WINDOWS_TEST_SUITE,
_MACOS_TEST_SUITE,
]
# Dictionary of allowlistable files where the key is a regex matching changed files
# and the value is a list of tests that should be run. An empty list means that
# the changed files should not trigger any tests. Any changed file that does not
# match any of these regexes will trigger all tests
# DO NOT CHANGE THIS UNLESS YOU KNOW WHAT YOU ARE DOING (be careful even if you do)
_ALLOWLIST_DICT = {
"^doc/": [],
"^examples/": [],
"^include/grpc\+\+/": [_CPP_TEST_SUITE],
"^include/grpcpp/": [_CPP_TEST_SUITE],
"^summerofcode/": [],
"^src/cpp/": [_CPP_TEST_SUITE],
"^src/csharp/": [_CSHARP_TEST_SUITE],
"^src/objective\-c/": [_OBJC_TEST_SUITE],
"^src/php/": [_PHP_TEST_SUITE],
"^src/python/": [_PYTHON_TEST_SUITE],
"^src/ruby/": [_RUBY_TEST_SUITE],
"^templates/": [],
"^test/core/": [_CORE_TEST_SUITE, _CPP_TEST_SUITE],
"^test/cpp/": [_CPP_TEST_SUITE],
"^test/distrib/cpp/": [_CPP_TEST_SUITE],
"^test/distrib/csharp/": [_CSHARP_TEST_SUITE],
"^test/distrib/php/": [_PHP_TEST_SUITE],
"^test/distrib/python/": [_PYTHON_TEST_SUITE],
"^test/distrib/ruby/": [_RUBY_TEST_SUITE],
"^tools/run_tests/xds_k8s_test_driver/": [],
"^tools/internal_ci/linux/grpc_xds_k8s.*": [],
"^vsprojects/": [_WINDOWS_TEST_SUITE],
"composer\.json$": [_PHP_TEST_SUITE],
"config\.m4$": [_PHP_TEST_SUITE],
"CONTRIBUTING\.md$": [],
"Gemfile$": [_RUBY_TEST_SUITE],
"grpc\.def$": [_WINDOWS_TEST_SUITE],
"grpc\.gemspec$": [_RUBY_TEST_SUITE],
"gRPC\.podspec$": [_OBJC_TEST_SUITE],
"gRPC\-Core\.podspec$": [_OBJC_TEST_SUITE],
"gRPC\-ProtoRPC\.podspec$": [_OBJC_TEST_SUITE],
"gRPC\-RxLibrary\.podspec$": [_OBJC_TEST_SUITE],
"BUILDING\.md$": [],
"LICENSE$": [],
"MANIFEST\.md$": [],
"package\.json$": [_PHP_TEST_SUITE],
"package\.xml$": [_PHP_TEST_SUITE],
"PATENTS$": [],
"PYTHON\-MANIFEST\.in$": [_PYTHON_TEST_SUITE],
"README\.md$": [],
"requirements\.txt$": [_PYTHON_TEST_SUITE],
"setup\.cfg$": [_PYTHON_TEST_SUITE],
"setup\.py$": [_PYTHON_TEST_SUITE],
}
# Regex that combines all keys in _ALLOWLIST_DICT
_ALL_TRIGGERS = "(" + ")|(".join(list(_ALLOWLIST_DICT.keys())) + ")"
# Add all triggers to their respective test suites
for trigger, test_suites in six.iteritems(_ALLOWLIST_DICT):
for test_suite in test_suites:
test_suite.add_trigger(trigger)
def _get_changed_files(base_branch):
"""
Get list of changed files between current branch and base of target merge branch
"""
# Get file changes between branch and merge-base of specified branch
# Not combined to be Windows friendly
base_commit = (
subprocess.check_output(["git", "merge-base", base_branch, "HEAD"])
.decode("UTF-8")
.rstrip()
)
return (
subprocess.check_output(
["git", "diff", base_commit, "--name-only", "HEAD"]
)
.decode("UTF-8")
.splitlines()
)
def _can_skip_tests(file_names, triggers):
"""
Determines if tests are skippable based on if all files do not match list of regexes
:param file_names: list of changed files generated by _get_changed_files()
:param triggers: list of regexes matching file name that indicates tests should be run
:return: safe to skip tests
"""
for file_name in file_names:
if any(re.match(trigger, file_name) for trigger in triggers):
return False
return True
def _remove_irrelevant_tests(tests, skippable_labels):
"""
Filters out tests by config or language - will not remove sanitizer tests
:param tests: list of all tests generated by run_tests_matrix.py
:param skippable_labels: list of languages and platforms with skippable tests
:return: list of relevant tests
"""
# test.labels[0] is platform and test.labels[2] is language
# We skip a test if both are considered safe to skip
return [
test
for test in tests
if test.labels[0] not in skippable_labels
or test.labels[2] not in skippable_labels
]
def affects_c_cpp(base_branch):
"""
Determines if a pull request's changes affect C/C++. This function exists because
there are pull request tests that only test C/C++ code
:param base_branch: branch that a pull request is requesting to merge into
:return: boolean indicating whether C/C++ changes are made in pull request
"""
changed_files = _get_changed_files(base_branch)
# Run all tests if any changed file is not in the allowlist dictionary
for changed_file in changed_files:
if not re.match(_ALL_TRIGGERS, changed_file):
return True
return not _can_skip_tests(
changed_files, _CPP_TEST_SUITE.triggers + _CORE_TEST_SUITE.triggers
)
def filter_tests(tests, base_branch):
"""
Filters out tests that are safe to ignore
:param tests: list of all tests generated by run_tests_matrix.py
:return: list of relevant tests
"""
print(
"Finding file differences between gRPC %s branch and pull request...\n"
% base_branch
)
changed_files = _get_changed_files(base_branch)
for changed_file in changed_files:
print(" %s" % changed_file)
print("")
# Run all tests if any changed file is not in the allowlist dictionary
for changed_file in changed_files:
if not re.match(_ALL_TRIGGERS, changed_file):
return tests
# Figure out which language and platform tests to run
skippable_labels = []
for test_suite in _ALL_TEST_SUITES:
if _can_skip_tests(changed_files, test_suite.triggers):
for label in test_suite.labels:
print(" %s tests safe to skip" % label)
skippable_labels.append(label)
tests = _remove_irrelevant_tests(tests, skippable_labels)
return tests
| 8,040
| 34.422907
| 98
|
py
|
grpc
|
grpc-master/tools/run_tests/python_utils/download_and_unzip.py
|
# Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Download and unzip the target file to the destination."""
from __future__ import print_function
import os
import sys
import tempfile
import zipfile
import requests
def main():
if len(sys.argv) != 3:
print("Usage: python download_and_unzip.py [zipfile-url] [destination]")
sys.exit(1)
download_url = sys.argv[1]
destination = sys.argv[2]
with tempfile.TemporaryFile() as tmp_file:
r = requests.get(download_url)
if r.status_code != requests.codes.ok:
print(
'Download %s failed with [%d] "%s"'
% (download_url, r.status_code, r.text())
)
sys.exit(1)
else:
tmp_file.write(r.content)
print("Successfully downloaded from %s", download_url)
with zipfile.ZipFile(tmp_file, "r") as target_zip_file:
target_zip_file.extractall(destination)
print("Successfully unzip to %s" % destination)
if __name__ == "__main__":
main()
| 1,585
| 30.098039
| 80
|
py
|
grpc
|
grpc-master/tools/run_tests/python_utils/check_on_pr.py
|
# Copyright 2018 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import datetime
import json
import os
import sys
import time
import traceback
import jwt
import requests
_GITHUB_API_PREFIX = "https://api.github.com"
_GITHUB_REPO = "grpc/grpc"
_GITHUB_APP_ID = 22338
_INSTALLATION_ID = 519109
_ACCESS_TOKEN_CACHE = None
_ACCESS_TOKEN_FETCH_RETRIES = 6
_ACCESS_TOKEN_FETCH_RETRIES_INTERVAL_S = 15
_CHANGE_LABELS = {
-1: "improvement",
0: "none",
1: "low",
2: "medium",
3: "high",
}
_INCREASE_DECREASE = {
-1: "decrease",
0: "neutral",
1: "increase",
}
def _jwt_token():
github_app_key = open(
os.path.join(
os.environ["KOKORO_KEYSTORE_DIR"], "73836_grpc_checks_private_key"
),
"rb",
).read()
return jwt.encode(
{
"iat": int(time.time()),
"exp": int(time.time() + 60 * 10), # expire in 10 minutes
"iss": _GITHUB_APP_ID,
},
github_app_key,
algorithm="RS256",
)
def _access_token():
global _ACCESS_TOKEN_CACHE
if _ACCESS_TOKEN_CACHE == None or _ACCESS_TOKEN_CACHE["exp"] < time.time():
for i in range(_ACCESS_TOKEN_FETCH_RETRIES):
resp = requests.post(
url="https://api.github.com/app/installations/%s/access_tokens"
% _INSTALLATION_ID,
headers={
"Authorization": "Bearer %s" % _jwt_token(),
"Accept": "application/vnd.github.machine-man-preview+json",
},
)
try:
_ACCESS_TOKEN_CACHE = {
"token": resp.json()["token"],
"exp": time.time() + 60,
}
break
except (KeyError, ValueError):
traceback.print_exc()
print("HTTP Status %d %s" % (resp.status_code, resp.reason))
print("Fetch access token from Github API failed:")
print(resp.text)
if i != _ACCESS_TOKEN_FETCH_RETRIES - 1:
print(
"Retrying after %.2f second."
% _ACCESS_TOKEN_FETCH_RETRIES_INTERVAL_S
)
time.sleep(_ACCESS_TOKEN_FETCH_RETRIES_INTERVAL_S)
else:
print("error: Unable to fetch access token, exiting...")
sys.exit(0)
return _ACCESS_TOKEN_CACHE["token"]
def _call(url, method="GET", json=None):
if not url.startswith("https://"):
url = _GITHUB_API_PREFIX + url
headers = {
"Authorization": "Bearer %s" % _access_token(),
"Accept": "application/vnd.github.antiope-preview+json",
}
return requests.request(method=method, url=url, headers=headers, json=json)
def _latest_commit():
resp = _call(
"/repos/%s/pulls/%s/commits"
% (_GITHUB_REPO, os.environ["KOKORO_GITHUB_PULL_REQUEST_NUMBER"])
)
return resp.json()[-1]
def check_on_pr(name, summary, success=True):
"""Create/Update a check on current pull request.
The check runs are aggregated by their name, so newer check will update the
older check with the same name.
Requires environment variable 'KOKORO_GITHUB_PULL_REQUEST_NUMBER' to indicate which pull request
should be updated.
Args:
name: The name of the check.
summary: A str in Markdown to be used as the detail information of the check.
success: A bool indicates whether the check is succeed or not.
"""
if "KOKORO_GIT_COMMIT" not in os.environ:
print("Missing KOKORO_GIT_COMMIT env var: not checking")
return
if "KOKORO_KEYSTORE_DIR" not in os.environ:
print("Missing KOKORO_KEYSTORE_DIR env var: not checking")
return
if "KOKORO_GITHUB_PULL_REQUEST_NUMBER" not in os.environ:
print("Missing KOKORO_GITHUB_PULL_REQUEST_NUMBER env var: not checking")
return
MAX_SUMMARY_LEN = 65400
if len(summary) > MAX_SUMMARY_LEN:
# Drop some hints to the log should someone come looking for what really happened!
print("Clipping too long summary")
print(summary)
summary = summary[:MAX_SUMMARY_LEN] + "\n\n\n... CLIPPED (too long)"
completion_time = (
str(datetime.datetime.utcnow().replace(microsecond=0).isoformat()) + "Z"
)
resp = _call(
"/repos/%s/check-runs" % _GITHUB_REPO,
method="POST",
json={
"name": name,
"head_sha": os.environ["KOKORO_GIT_COMMIT"],
"status": "completed",
"completed_at": completion_time,
"conclusion": "success" if success else "failure",
"output": {
"title": name,
"summary": summary,
},
},
)
print(
"Result of Creating/Updating Check on PR:",
json.dumps(resp.json(), indent=2),
)
def label_significance_on_pr(name, change, labels=_CHANGE_LABELS):
"""Add a label to the PR indicating the significance of the check.
Requires environment variable 'KOKORO_GITHUB_PULL_REQUEST_NUMBER' to indicate which pull request
should be updated.
Args:
name: The name of the label.
value: A str in Markdown to be used as the detail information of the label.
"""
if change < min(list(labels.keys())):
change = min(list(labels.keys()))
if change > max(list(labels.keys())):
change = max(list(labels.keys()))
value = labels[change]
if "KOKORO_GIT_COMMIT" not in os.environ:
print("Missing KOKORO_GIT_COMMIT env var: not checking")
return
if "KOKORO_KEYSTORE_DIR" not in os.environ:
print("Missing KOKORO_KEYSTORE_DIR env var: not checking")
return
if "KOKORO_GITHUB_PULL_REQUEST_NUMBER" not in os.environ:
print("Missing KOKORO_GITHUB_PULL_REQUEST_NUMBER env var: not checking")
return
existing = _call(
"/repos/%s/issues/%s/labels"
% (_GITHUB_REPO, os.environ["KOKORO_GITHUB_PULL_REQUEST_NUMBER"]),
method="GET",
).json()
print("Result of fetching labels on PR:", existing)
new = [x["name"] for x in existing if not x["name"].startswith(name + "/")]
new.append(name + "/" + value)
resp = _call(
"/repos/%s/issues/%s/labels"
% (_GITHUB_REPO, os.environ["KOKORO_GITHUB_PULL_REQUEST_NUMBER"]),
method="PUT",
json=new,
)
print("Result of setting labels on PR:", resp.text)
def label_increase_decrease_on_pr(name, change, significant):
if change <= -significant:
label_significance_on_pr(name, -1, _INCREASE_DECREASE)
elif change >= significant:
label_significance_on_pr(name, 1, _INCREASE_DECREASE)
else:
label_significance_on_pr(name, 0, _INCREASE_DECREASE)
| 7,388
| 31.84
| 100
|
py
|
grpc
|
grpc-master/tools/run_tests/python_utils/report_utils.py
|
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate XML and HTML test reports."""
try:
from mako import exceptions
from mako.runtime import Context
from mako.template import Template
except ImportError:
pass # Mako not installed but it is ok.
import datetime
import os
import string
import xml.etree.cElementTree as ET
import six
def _filter_msg(msg, output_format):
"""Filters out nonprintable and illegal characters from the message."""
if output_format in ["XML", "HTML"]:
if isinstance(msg, bytes):
decoded_msg = msg.decode("UTF-8", "ignore")
else:
decoded_msg = msg
# keep whitespaces but remove formfeed and vertical tab characters
# that make XML report unparsable.
filtered_msg = "".join(
filter(
lambda x: x in string.printable and x != "\f" and x != "\v",
decoded_msg,
)
)
if output_format == "HTML":
filtered_msg = filtered_msg.replace('"', """)
return filtered_msg
else:
return msg
def new_junit_xml_tree():
return ET.ElementTree(ET.Element("testsuites"))
def render_junit_xml_report(
resultset,
report_file,
suite_package="grpc",
suite_name="tests",
replace_dots=True,
multi_target=False,
):
"""Generate JUnit-like XML report."""
if not multi_target:
tree = new_junit_xml_tree()
append_junit_xml_results(
tree, resultset, suite_package, suite_name, "1", replace_dots
)
create_xml_report_file(tree, report_file)
else:
# To have each test result displayed as a separate target by the Resultstore/Sponge UI,
# we generate a separate XML report file for each test result
for shortname, results in six.iteritems(resultset):
one_result = {shortname: results}
tree = new_junit_xml_tree()
append_junit_xml_results(
tree,
one_result,
"%s_%s" % (suite_package, shortname),
"%s_%s" % (suite_name, shortname),
"1",
replace_dots,
)
per_suite_report_file = os.path.join(
os.path.dirname(report_file),
shortname,
os.path.basename(report_file),
)
create_xml_report_file(tree, per_suite_report_file)
def create_xml_report_file(tree, report_file):
"""Generate JUnit-like report file from xml tree ."""
# env variable can be used to override the base location for the reports
base_dir = os.getenv("GRPC_TEST_REPORT_BASE_DIR", None)
if base_dir:
report_file = os.path.join(base_dir, report_file)
# ensure the report directory exists
report_dir = os.path.dirname(os.path.abspath(report_file))
if not os.path.exists(report_dir):
os.makedirs(report_dir)
tree.write(report_file, encoding="UTF-8")
def append_junit_xml_results(
tree, resultset, suite_package, suite_name, id, replace_dots=True
):
"""Append a JUnit-like XML report tree with test results as a new suite."""
if replace_dots:
# ResultStore UI displays test suite names containing dots only as the component
# after the last dot, which results bad info being displayed in the UI.
# We replace dots by another character to avoid this problem.
suite_name = suite_name.replace(".", "_")
testsuite = ET.SubElement(
tree.getroot(),
"testsuite",
id=id,
package=suite_package,
name=suite_name,
timestamp=datetime.datetime.now().isoformat(),
)
failure_count = 0
error_count = 0
for shortname, results in six.iteritems(resultset):
for result in results:
xml_test = ET.SubElement(testsuite, "testcase", name=shortname)
if result.elapsed_time:
xml_test.set("time", str(result.elapsed_time))
filtered_msg = _filter_msg(result.message, "XML")
if result.state == "FAILED":
ET.SubElement(
xml_test, "failure", message="Failure"
).text = filtered_msg
failure_count += 1
elif result.state == "TIMEOUT":
ET.SubElement(
xml_test, "error", message="Timeout"
).text = filtered_msg
error_count += 1
elif result.state == "SKIPPED":
ET.SubElement(xml_test, "skipped", message="Skipped")
testsuite.set("failures", str(failure_count))
testsuite.set("errors", str(error_count))
def render_interop_html_report(
client_langs,
server_langs,
test_cases,
auth_test_cases,
http2_cases,
http2_server_cases,
resultset,
num_failures,
cloud_to_prod,
prod_servers,
http2_interop,
):
"""Generate HTML report for interop tests."""
template_file = "tools/run_tests/interop/interop_html_report.template"
try:
mytemplate = Template(filename=template_file, format_exceptions=True)
except NameError:
print(
"Mako template is not installed. Skipping HTML report generation."
)
return
except IOError as e:
print(("Failed to find the template %s: %s" % (template_file, e)))
return
sorted_test_cases = sorted(test_cases)
sorted_auth_test_cases = sorted(auth_test_cases)
sorted_http2_cases = sorted(http2_cases)
sorted_http2_server_cases = sorted(http2_server_cases)
sorted_client_langs = sorted(client_langs)
sorted_server_langs = sorted(server_langs)
sorted_prod_servers = sorted(prod_servers)
args = {
"client_langs": sorted_client_langs,
"server_langs": sorted_server_langs,
"test_cases": sorted_test_cases,
"auth_test_cases": sorted_auth_test_cases,
"http2_cases": sorted_http2_cases,
"http2_server_cases": sorted_http2_server_cases,
"resultset": resultset,
"num_failures": num_failures,
"cloud_to_prod": cloud_to_prod,
"prod_servers": sorted_prod_servers,
"http2_interop": http2_interop,
}
html_report_out_dir = "reports"
if not os.path.exists(html_report_out_dir):
os.mkdir(html_report_out_dir)
html_file_path = os.path.join(html_report_out_dir, "index.html")
try:
with open(html_file_path, "w") as output_file:
mytemplate.render_context(Context(output_file, **args))
except:
print((exceptions.text_error_template().render()))
raise
def render_perf_profiling_results(output_filepath, profile_names):
with open(output_filepath, "w") as output_file:
output_file.write("<ul>\n")
for name in profile_names:
output_file.write("<li><a href=%s>%s</a></li>\n" % (name, name))
output_file.write("</ul>\n")
| 7,463
| 34.042254
| 95
|
py
|
grpc
|
grpc-master/tools/run_tests/python_utils/upload_test_results.py
|
#!/usr/bin/env python3
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper to upload Jenkins test results to BQ"""
from __future__ import print_function
import os
import sys
import time
import uuid
import six
gcp_utils_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), "../../gcp/utils")
)
sys.path.append(gcp_utils_dir)
import big_query_utils
_DATASET_ID = "jenkins_test_results"
_DESCRIPTION = "Test results from master job run on Jenkins"
# 365 days in milliseconds
_EXPIRATION_MS = 365 * 24 * 60 * 60 * 1000
_PARTITION_TYPE = "DAY"
_PROJECT_ID = "grpc-testing"
_RESULTS_SCHEMA = [
("job_name", "STRING", "Name of Jenkins job"),
("build_id", "INTEGER", "Build ID of Jenkins job"),
("build_url", "STRING", "URL of Jenkins job"),
("test_name", "STRING", "Individual test name"),
("language", "STRING", "Language of test"),
("platform", "STRING", "Platform used for test"),
("config", "STRING", "Config used for test"),
("compiler", "STRING", "Compiler used for test"),
("iomgr_platform", "STRING", "Iomgr used for test"),
("result", "STRING", "Test result: PASSED, TIMEOUT, FAILED, or SKIPPED"),
("timestamp", "TIMESTAMP", "Timestamp of test run"),
("elapsed_time", "FLOAT", "How long test took to run"),
("cpu_estimated", "FLOAT", "Estimated CPU usage of test"),
("cpu_measured", "FLOAT", "Actual CPU usage of test"),
("return_code", "INTEGER", "Exit code of test"),
]
_INTEROP_RESULTS_SCHEMA = [
("job_name", "STRING", "Name of Jenkins/Kokoro job"),
("build_id", "INTEGER", "Build ID of Jenkins/Kokoro job"),
("build_url", "STRING", "URL of Jenkins/Kokoro job"),
(
"test_name",
"STRING",
"Unique test name combining client, server, and test_name",
),
(
"suite",
"STRING",
"Test suite: cloud_to_cloud, cloud_to_prod, or cloud_to_prod_auth",
),
("client", "STRING", "Client language"),
("server", "STRING", "Server host name"),
("test_case", "STRING", "Name of test case"),
("result", "STRING", "Test result: PASSED, TIMEOUT, FAILED, or SKIPPED"),
("timestamp", "TIMESTAMP", "Timestamp of test run"),
("elapsed_time", "FLOAT", "How long test took to run"),
]
def _get_build_metadata(test_results):
"""Add Kokoro build metadata to test_results based on environment
variables set by Kokoro.
"""
build_id = os.getenv("KOKORO_BUILD_NUMBER")
build_url = (
"https://source.cloud.google.com/results/invocations/%s"
% os.getenv("KOKORO_BUILD_ID")
)
job_name = os.getenv("KOKORO_JOB_NAME")
if build_id:
test_results["build_id"] = build_id
if build_url:
test_results["build_url"] = build_url
if job_name:
test_results["job_name"] = job_name
def _insert_rows_with_retries(bq, bq_table, bq_rows):
"""Insert rows to bq table. Retry on error."""
# BigQuery sometimes fails with large uploads, so batch 1,000 rows at a time.
for i in range((len(bq_rows) // 1000) + 1):
max_retries = 3
for attempt in range(max_retries):
if big_query_utils.insert_rows(
bq,
_PROJECT_ID,
_DATASET_ID,
bq_table,
bq_rows[i * 1000 : (i + 1) * 1000],
):
break
else:
if attempt < max_retries - 1:
print("Error uploading result to bigquery, will retry.")
else:
print(
"Error uploading result to bigquery, all attempts"
" failed."
)
sys.exit(1)
def upload_results_to_bq(resultset, bq_table, extra_fields):
"""Upload test results to a BQ table.
Args:
resultset: dictionary generated by jobset.run
bq_table: string name of table to create/upload results to in BQ
extra_fields: dict with extra values that will be uploaded along with the results
"""
bq = big_query_utils.create_big_query()
big_query_utils.create_partitioned_table(
bq,
_PROJECT_ID,
_DATASET_ID,
bq_table,
_RESULTS_SCHEMA,
_DESCRIPTION,
partition_type=_PARTITION_TYPE,
expiration_ms=_EXPIRATION_MS,
)
bq_rows = []
for shortname, results in six.iteritems(resultset):
for result in results:
test_results = {}
_get_build_metadata(test_results)
test_results["cpu_estimated"] = result.cpu_estimated
test_results["cpu_measured"] = result.cpu_measured
test_results["elapsed_time"] = "%.2f" % result.elapsed_time
test_results["result"] = result.state
test_results["return_code"] = result.returncode
test_results["test_name"] = shortname
test_results["timestamp"] = time.strftime("%Y-%m-%d %H:%M:%S")
for field_name, field_value in six.iteritems(extra_fields):
test_results[field_name] = field_value
row = big_query_utils.make_row(str(uuid.uuid4()), test_results)
bq_rows.append(row)
_insert_rows_with_retries(bq, bq_table, bq_rows)
def upload_interop_results_to_bq(resultset, bq_table):
"""Upload interop test results to a BQ table.
Args:
resultset: dictionary generated by jobset.run
bq_table: string name of table to create/upload results to in BQ
"""
bq = big_query_utils.create_big_query()
big_query_utils.create_partitioned_table(
bq,
_PROJECT_ID,
_DATASET_ID,
bq_table,
_INTEROP_RESULTS_SCHEMA,
_DESCRIPTION,
partition_type=_PARTITION_TYPE,
expiration_ms=_EXPIRATION_MS,
)
bq_rows = []
for shortname, results in six.iteritems(resultset):
for result in results:
test_results = {}
_get_build_metadata(test_results)
test_results["elapsed_time"] = "%.2f" % result.elapsed_time
test_results["result"] = result.state
test_results["test_name"] = shortname
test_results["suite"] = shortname.split(":")[0]
test_results["client"] = shortname.split(":")[1]
test_results["server"] = shortname.split(":")[2]
test_results["test_case"] = shortname.split(":")[3]
test_results["timestamp"] = time.strftime("%Y-%m-%d %H:%M:%S")
row = big_query_utils.make_row(str(uuid.uuid4()), test_results)
bq_rows.append(row)
_insert_rows_with_retries(bq, bq_table, bq_rows)
| 7,154
| 35.505102
| 89
|
py
|
grpc
|
grpc-master/tools/run_tests/python_utils/watch_dirs.py
|
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper to watch a (set) of directories for modifications."""
import os
import time
from six import string_types
class DirWatcher(object):
"""Helper to watch a (set) of directories for modifications."""
def __init__(self, paths):
if isinstance(paths, string_types):
paths = [paths]
self._done = False
self.paths = list(paths)
self.lastrun = time.time()
self._cache = self._calculate()
def _calculate(self):
"""Walk over all subscribed paths, check most recent mtime."""
most_recent_change = None
for path in self.paths:
if not os.path.exists(path):
continue
if not os.path.isdir(path):
continue
for root, _, files in os.walk(path):
for f in files:
if f and f[0] == ".":
continue
try:
st = os.stat(os.path.join(root, f))
except OSError as e:
if e.errno == os.errno.ENOENT:
continue
raise
if most_recent_change is None:
most_recent_change = st.st_mtime
else:
most_recent_change = max(
most_recent_change, st.st_mtime
)
return most_recent_change
def most_recent_change(self):
if time.time() - self.lastrun > 1:
self._cache = self._calculate()
self.lastrun = time.time()
return self._cache
| 2,220
| 33.703125
| 74
|
py
|
grpc
|
grpc-master/tools/run_tests/python_utils/jobset.py
|
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run a group of subprocesses and then finish."""
import errno
import logging
import multiprocessing
import os
import platform
import re
import signal
import subprocess
import sys
import tempfile
import time
# cpu cost measurement
measure_cpu_costs = False
_DEFAULT_MAX_JOBS = 16 * multiprocessing.cpu_count()
# Maximum number of bytes of job's stdout that will be stored in the result.
# Only last N bytes of stdout will be kept if the actual output longer.
_MAX_RESULT_SIZE = 64 * 1024
# NOTE: If you change this, please make sure to test reviewing the
# github PR with http://reviewable.io, which is known to add UTF-8
# characters to the PR description, which leak into the environment here
# and cause failures.
def strip_non_ascii_chars(s):
return "".join(c for c in s if ord(c) < 128)
def sanitized_environment(env):
sanitized = {}
for key, value in list(env.items()):
sanitized[strip_non_ascii_chars(key)] = strip_non_ascii_chars(value)
return sanitized
def platform_string():
if platform.system() == "Windows":
return "windows"
elif platform.system()[:7] == "MSYS_NT":
return "windows"
elif platform.system() == "Darwin":
return "mac"
elif platform.system() == "Linux":
return "linux"
else:
return "posix"
# setup a signal handler so that signal.pause registers 'something'
# when a child finishes
# not using futures and threading to avoid a dependency on subprocess32
if platform_string() == "windows":
pass
else:
def alarm_handler(unused_signum, unused_frame):
pass
signal.signal(signal.SIGCHLD, lambda unused_signum, unused_frame: None)
signal.signal(signal.SIGALRM, alarm_handler)
_SUCCESS = object()
_FAILURE = object()
_RUNNING = object()
_KILLED = object()
_COLORS = {
"red": [31, 0],
"green": [32, 0],
"yellow": [33, 0],
"lightgray": [37, 0],
"gray": [30, 1],
"purple": [35, 0],
"cyan": [36, 0],
}
_BEGINNING_OF_LINE = "\x1b[0G"
_CLEAR_LINE = "\x1b[2K"
_TAG_COLOR = {
"FAILED": "red",
"FLAKE": "purple",
"TIMEOUT_FLAKE": "purple",
"WARNING": "yellow",
"TIMEOUT": "red",
"PASSED": "green",
"START": "gray",
"WAITING": "yellow",
"SUCCESS": "green",
"IDLE": "gray",
"SKIPPED": "cyan",
}
_FORMAT = "%(asctime)-15s %(message)s"
logging.basicConfig(level=logging.INFO, format=_FORMAT)
def eintr_be_gone(fn):
"""Run fn until it doesn't stop because of EINTR"""
while True:
try:
return fn()
except IOError as e:
if e.errno != errno.EINTR:
raise
def message(tag, msg, explanatory_text=None, do_newline=False):
if (
message.old_tag == tag
and message.old_msg == msg
and not explanatory_text
):
return
message.old_tag = tag
message.old_msg = msg
if explanatory_text:
if isinstance(explanatory_text, bytes):
explanatory_text = explanatory_text.decode("utf8", errors="replace")
while True:
try:
if platform_string() == "windows" or not sys.stdout.isatty():
if explanatory_text:
logging.info(explanatory_text)
logging.info("%s: %s", tag, msg)
else:
sys.stdout.write(
"%s%s%s\x1b[%d;%dm%s\x1b[0m: %s%s"
% (
_BEGINNING_OF_LINE,
_CLEAR_LINE,
"\n%s" % explanatory_text
if explanatory_text is not None
else "",
_COLORS[_TAG_COLOR[tag]][1],
_COLORS[_TAG_COLOR[tag]][0],
tag,
msg,
"\n"
if do_newline or explanatory_text is not None
else "",
)
)
sys.stdout.flush()
return
except IOError as e:
if e.errno != errno.EINTR:
raise
message.old_tag = ""
message.old_msg = ""
def which(filename):
if "/" in filename:
return filename
for path in os.environ["PATH"].split(os.pathsep):
if os.path.exists(os.path.join(path, filename)):
return os.path.join(path, filename)
raise Exception("%s not found" % filename)
class JobSpec(object):
"""Specifies what to run for a job."""
def __init__(
self,
cmdline,
shortname=None,
environ=None,
cwd=None,
shell=False,
timeout_seconds=5 * 60,
flake_retries=0,
timeout_retries=0,
kill_handler=None,
cpu_cost=1.0,
verbose_success=False,
logfilename=None,
):
"""
Arguments:
cmdline: a list of arguments to pass as the command line
environ: a dictionary of environment variables to set in the child process
kill_handler: a handler that will be called whenever job.kill() is invoked
cpu_cost: number of cores per second this job needs
logfilename: use given file to store job's output, rather than using a temporary file
"""
if environ is None:
environ = {}
self.cmdline = cmdline
self.environ = environ
self.shortname = cmdline[0] if shortname is None else shortname
self.cwd = cwd
self.shell = shell
self.timeout_seconds = timeout_seconds
self.flake_retries = flake_retries
self.timeout_retries = timeout_retries
self.kill_handler = kill_handler
self.cpu_cost = cpu_cost
self.verbose_success = verbose_success
self.logfilename = logfilename
if (
self.logfilename
and self.flake_retries != 0
and self.timeout_retries != 0
):
# Forbidden to avoid overwriting the test log when retrying.
raise Exception(
"Cannot use custom logfile when retries are enabled"
)
def identity(self):
return "%r %r" % (self.cmdline, self.environ)
def __hash__(self):
return hash(self.identity())
def __cmp__(self, other):
return self.identity() == other.identity()
def __lt__(self, other):
return self.identity() < other.identity()
def __repr__(self):
return "JobSpec(shortname=%s, cmdline=%s)" % (
self.shortname,
self.cmdline,
)
def __str__(self):
return "%s: %s %s" % (
self.shortname,
" ".join("%s=%s" % kv for kv in list(self.environ.items())),
" ".join(self.cmdline),
)
class JobResult(object):
def __init__(self):
self.state = "UNKNOWN"
self.returncode = -1
self.elapsed_time = 0
self.num_failures = 0
self.retries = 0
self.message = ""
self.cpu_estimated = 1
self.cpu_measured = 1
def read_from_start(f):
f.seek(0)
return f.read()
class Job(object):
"""Manages one job."""
def __init__(
self, spec, newline_on_success, travis, add_env, quiet_success=False
):
self._spec = spec
self._newline_on_success = newline_on_success
self._travis = travis
self._add_env = add_env.copy()
self._retries = 0
self._timeout_retries = 0
self._suppress_failure_message = False
self._quiet_success = quiet_success
if not self._quiet_success:
message("START", spec.shortname, do_newline=self._travis)
self.result = JobResult()
self.start()
def GetSpec(self):
return self._spec
def start(self):
if self._spec.logfilename:
# make sure the log directory exists
logfile_dir = os.path.dirname(
os.path.abspath(self._spec.logfilename)
)
if not os.path.exists(logfile_dir):
os.makedirs(logfile_dir)
self._logfile = open(self._spec.logfilename, "w+")
else:
# macOS: a series of quick os.unlink invocation might cause OS
# error during the creation of temporary file. By using
# NamedTemporaryFile, we defer the removal of file and directory.
self._logfile = tempfile.NamedTemporaryFile()
env = dict(os.environ)
env.update(self._spec.environ)
env.update(self._add_env)
env = sanitized_environment(env)
self._start = time.time()
cmdline = self._spec.cmdline
# The Unix time command is finicky when used with MSBuild, so we don't use it
# with jobs that run MSBuild.
global measure_cpu_costs
if measure_cpu_costs and not "vsprojects\\build" in cmdline[0]:
cmdline = ["time", "-p"] + cmdline
else:
measure_cpu_costs = False
try_start = lambda: subprocess.Popen(
args=cmdline,
stderr=subprocess.STDOUT,
stdout=self._logfile,
cwd=self._spec.cwd,
shell=self._spec.shell,
env=env,
)
delay = 0.3
for i in range(0, 4):
try:
self._process = try_start()
break
except OSError:
message(
"WARNING",
"Failed to start %s, retrying in %f seconds"
% (self._spec.shortname, delay),
)
time.sleep(delay)
delay *= 2
else:
self._process = try_start()
self._state = _RUNNING
def state(self):
"""Poll current state of the job. Prints messages at completion."""
def stdout(self=self):
stdout = read_from_start(self._logfile)
self.result.message = stdout[-_MAX_RESULT_SIZE:]
return stdout
if self._state == _RUNNING and self._process.poll() is not None:
elapsed = time.time() - self._start
self.result.elapsed_time = elapsed
if self._process.returncode != 0:
if self._retries < self._spec.flake_retries:
message(
"FLAKE",
"%s [ret=%d, pid=%d]"
% (
self._spec.shortname,
self._process.returncode,
self._process.pid,
),
stdout(),
do_newline=True,
)
self._retries += 1
self.result.num_failures += 1
self.result.retries = self._timeout_retries + self._retries
# NOTE: job is restarted regardless of jobset's max_time setting
self.start()
else:
self._state = _FAILURE
if not self._suppress_failure_message:
message(
"FAILED",
"%s [ret=%d, pid=%d, time=%.1fsec]"
% (
self._spec.shortname,
self._process.returncode,
self._process.pid,
elapsed,
),
stdout(),
do_newline=True,
)
self.result.state = "FAILED"
self.result.num_failures += 1
self.result.returncode = self._process.returncode
else:
self._state = _SUCCESS
measurement = ""
if measure_cpu_costs:
m = re.search(
r"real\s+([0-9.]+)\nuser\s+([0-9.]+)\nsys\s+([0-9.]+)",
(stdout()).decode("utf8", errors="replace"),
)
real = float(m.group(1))
user = float(m.group(2))
sys = float(m.group(3))
if real > 0.5:
cores = (user + sys) / real
self.result.cpu_measured = float("%.01f" % cores)
self.result.cpu_estimated = float(
"%.01f" % self._spec.cpu_cost
)
measurement = "; cpu_cost=%.01f; estimated=%.01f" % (
self.result.cpu_measured,
self.result.cpu_estimated,
)
if not self._quiet_success:
message(
"PASSED",
"%s [time=%.1fsec, retries=%d:%d%s]"
% (
self._spec.shortname,
elapsed,
self._retries,
self._timeout_retries,
measurement,
),
stdout() if self._spec.verbose_success else None,
do_newline=self._newline_on_success or self._travis,
)
self.result.state = "PASSED"
elif (
self._state == _RUNNING
and self._spec.timeout_seconds is not None
and time.time() - self._start > self._spec.timeout_seconds
):
elapsed = time.time() - self._start
self.result.elapsed_time = elapsed
if self._timeout_retries < self._spec.timeout_retries:
message(
"TIMEOUT_FLAKE",
"%s [pid=%d]" % (self._spec.shortname, self._process.pid),
stdout(),
do_newline=True,
)
self._timeout_retries += 1
self.result.num_failures += 1
self.result.retries = self._timeout_retries + self._retries
if self._spec.kill_handler:
self._spec.kill_handler(self)
self._process.terminate()
# NOTE: job is restarted regardless of jobset's max_time setting
self.start()
else:
message(
"TIMEOUT",
"%s [pid=%d, time=%.1fsec]"
% (self._spec.shortname, self._process.pid, elapsed),
stdout(),
do_newline=True,
)
self.kill()
self.result.state = "TIMEOUT"
self.result.num_failures += 1
return self._state
def kill(self):
if self._state == _RUNNING:
self._state = _KILLED
if self._spec.kill_handler:
self._spec.kill_handler(self)
self._process.terminate()
def suppress_failure_message(self):
self._suppress_failure_message = True
class Jobset(object):
"""Manages one run of jobs."""
def __init__(
self,
check_cancelled,
maxjobs,
maxjobs_cpu_agnostic,
newline_on_success,
travis,
stop_on_failure,
add_env,
quiet_success,
max_time,
):
self._running = set()
self._check_cancelled = check_cancelled
self._cancelled = False
self._failures = 0
self._completed = 0
self._maxjobs = maxjobs
self._maxjobs_cpu_agnostic = maxjobs_cpu_agnostic
self._newline_on_success = newline_on_success
self._travis = travis
self._stop_on_failure = stop_on_failure
self._add_env = add_env
self._quiet_success = quiet_success
self._max_time = max_time
self.resultset = {}
self._remaining = None
self._start_time = time.time()
def set_remaining(self, remaining):
self._remaining = remaining
def get_num_failures(self):
return self._failures
def cpu_cost(self):
c = 0
for job in self._running:
c += job._spec.cpu_cost
return c
def start(self, spec):
"""Start a job. Return True on success, False on failure."""
while True:
if (
self._max_time > 0
and time.time() - self._start_time > self._max_time
):
skipped_job_result = JobResult()
skipped_job_result.state = "SKIPPED"
message("SKIPPED", spec.shortname, do_newline=True)
self.resultset[spec.shortname] = [skipped_job_result]
return True
if self.cancelled():
return False
current_cpu_cost = self.cpu_cost()
if current_cpu_cost == 0:
break
if current_cpu_cost + spec.cpu_cost <= self._maxjobs:
if len(self._running) < self._maxjobs_cpu_agnostic:
break
self.reap(spec.shortname, spec.cpu_cost)
if self.cancelled():
return False
job = Job(
spec,
self._newline_on_success,
self._travis,
self._add_env,
self._quiet_success,
)
self._running.add(job)
if job.GetSpec().shortname not in self.resultset:
self.resultset[job.GetSpec().shortname] = []
return True
def reap(self, waiting_for=None, waiting_for_cost=None):
"""Collect the dead jobs."""
while self._running:
dead = set()
for job in self._running:
st = eintr_be_gone(lambda: job.state())
if st == _RUNNING:
continue
if st == _FAILURE or st == _KILLED:
self._failures += 1
if self._stop_on_failure:
self._cancelled = True
for job in self._running:
job.kill()
dead.add(job)
break
for job in dead:
self._completed += 1
if not self._quiet_success or job.result.state != "PASSED":
self.resultset[job.GetSpec().shortname].append(job.result)
self._running.remove(job)
if dead:
return
if not self._travis and platform_string() != "windows":
rstr = (
""
if self._remaining is None
else "%d queued, " % self._remaining
)
if self._remaining is not None and self._completed > 0:
now = time.time()
sofar = now - self._start_time
remaining = (
sofar
/ self._completed
* (self._remaining + len(self._running))
)
rstr = "ETA %.1f sec; %s" % (remaining, rstr)
if waiting_for is not None:
wstr = " next: %s @ %.2f cpu" % (
waiting_for,
waiting_for_cost,
)
else:
wstr = ""
message(
"WAITING",
"%s%d jobs running, %d complete, %d failed (load %.2f)%s"
% (
rstr,
len(self._running),
self._completed,
self._failures,
self.cpu_cost(),
wstr,
),
)
if platform_string() == "windows":
time.sleep(0.1)
else:
signal.alarm(10)
signal.pause()
def cancelled(self):
"""Poll for cancellation."""
if self._cancelled:
return True
if not self._check_cancelled():
return False
for job in self._running:
job.kill()
self._cancelled = True
return True
def finish(self):
while self._running:
if self.cancelled():
pass # poll cancellation
self.reap()
if platform_string() != "windows":
signal.alarm(0)
return not self.cancelled() and self._failures == 0
def _never_cancelled():
return False
def tag_remaining(xs):
staging = []
for x in xs:
staging.append(x)
if len(staging) > 5000:
yield (staging.pop(0), None)
n = len(staging)
for i, x in enumerate(staging):
yield (x, n - i - 1)
def run(
cmdlines,
check_cancelled=_never_cancelled,
maxjobs=None,
maxjobs_cpu_agnostic=None,
newline_on_success=False,
travis=False,
infinite_runs=False,
stop_on_failure=False,
add_env={},
skip_jobs=False,
quiet_success=False,
max_time=-1,
):
if skip_jobs:
resultset = {}
skipped_job_result = JobResult()
skipped_job_result.state = "SKIPPED"
for job in cmdlines:
message("SKIPPED", job.shortname, do_newline=True)
resultset[job.shortname] = [skipped_job_result]
return 0, resultset
js = Jobset(
check_cancelled,
maxjobs if maxjobs is not None else _DEFAULT_MAX_JOBS,
maxjobs_cpu_agnostic
if maxjobs_cpu_agnostic is not None
else _DEFAULT_MAX_JOBS,
newline_on_success,
travis,
stop_on_failure,
add_env,
quiet_success,
max_time,
)
for cmdline, remaining in tag_remaining(cmdlines):
if not js.start(cmdline):
break
if remaining is not None:
js.set_remaining(remaining)
js.finish()
return js.get_num_failures(), js.resultset
| 22,631
| 31.517241
| 95
|
py
|
grpc
|
grpc-master/tools/run_tests/python_utils/bazel_report_helper.py
|
#!/usr/bin/env python3
# Copyright 2022 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helps with running bazel with extra settings to generate structured test reports in CI."""
import argparse
import os
import platform
import sys
import uuid
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "../../.."))
os.chdir(_ROOT)
# How long to sleep before querying Resultstore API and uploading to bigquery
# (to let ResultStore finish writing results from the bazel invocation that has
# just finished).
_UPLOAD_RBE_RESULTS_DELAY_SECONDS = 60
def _platform_string():
"""Detect current platform"""
if platform.system() == "Windows":
return "windows"
elif platform.system()[:7] == "MSYS_NT":
return "windows"
elif platform.system() == "Darwin":
return "mac"
elif platform.system() == "Linux":
return "linux"
else:
return "posix"
def _append_to_kokoro_bazel_invocations(invocation_id: str) -> None:
"""Kokoro can display "Bazel" result link on kokoro jobs if told so."""
# to get "bazel" link for kokoro build, we need to upload
# the "bazel_invocation_ids" file with bazel invocation ID as artifact.
kokoro_artifacts_dir = os.getenv("KOKORO_ARTIFACTS_DIR")
if kokoro_artifacts_dir:
# append the bazel invocation UUID to the bazel_invocation_ids file.
with open(
os.path.join(kokoro_artifacts_dir, "bazel_invocation_ids"), "a"
) as f:
f.write(invocation_id + "\n")
print(
'Added invocation ID %s to kokoro "bazel_invocation_ids" artifact'
% invocation_id,
file=sys.stderr,
)
else:
print(
'Skipped adding invocation ID %s to kokoro "bazel_invocation_ids"'
" artifact" % invocation_id,
file=sys.stderr,
)
pass
def _generate_junit_report_string(
report_suite_name: str, invocation_id: str, success: bool
) -> None:
"""Generate sponge_log.xml formatted report, that will make the bazel invocation reachable as a target in resultstore UI / sponge."""
bazel_invocation_url = (
"https://source.cloud.google.com/results/invocations/%s" % invocation_id
)
package_name = report_suite_name
# set testcase name to invocation URL. That way, the link will be displayed in some form
# resultstore UI and sponge even in case the bazel invocation succeeds.
testcase_name = bazel_invocation_url
if success:
# unfortunately, neither resultstore UI nor sponge display the "system-err" output (or any other tags)
# on a passing test case. But at least we tried.
test_output_tag = (
"<system-err>PASSED. See invocation results here: %s</system-err>"
% bazel_invocation_url
)
else:
# The failure output will be displayes in both resultstore UI and sponge when clicking on the failing testcase.
test_output_tag = (
'<failure message="Failure">FAILED. See bazel invocation results'
" here: %s</failure>" % bazel_invocation_url
)
lines = [
"<testsuites>",
'<testsuite id="1" name="%s" package="%s">'
% (report_suite_name, package_name),
'<testcase name="%s">' % testcase_name,
test_output_tag,
"</testcase></testsuite>",
"</testsuites>",
]
return "\n".join(lines)
def _create_bazel_wrapper(
report_path: str,
report_suite_name: str,
invocation_id: str,
upload_results: bool,
) -> None:
"""Create a "bazel wrapper" script that will execute bazel with extra settings and postprocessing."""
os.makedirs(report_path, exist_ok=True)
bazel_wrapper_filename = os.path.join(report_path, "bazel_wrapper")
bazel_wrapper_bat_filename = bazel_wrapper_filename + ".bat"
bazel_rc_filename = os.path.join(report_path, "bazel_wrapper.bazelrc")
# put xml reports in a separate directory if requested by GRPC_TEST_REPORT_BASE_DIR
report_base_dir = os.getenv("GRPC_TEST_REPORT_BASE_DIR", None)
xml_report_path = os.path.abspath(
os.path.join(report_base_dir, report_path)
if report_base_dir
else report_path
)
os.makedirs(xml_report_path, exist_ok=True)
failing_report_filename = os.path.join(xml_report_path, "sponge_log.xml")
success_report_filename = os.path.join(
xml_report_path, "success_log_to_rename.xml"
)
if _platform_string() == "windows":
workspace_status_command = (
"tools/remote_build/workspace_status_kokoro.bat"
)
else:
workspace_status_command = (
"tools/remote_build/workspace_status_kokoro.sh"
)
# generate RC file with the bazel flags we want to use apply.
# Using an RC file solves problems with flag ordering in the wrapper.
# (e.g. some flags need to come after the build/test command)
with open(bazel_rc_filename, "w") as f:
f.write('build --invocation_id="%s"\n' % invocation_id)
f.write(
'build --workspace_status_command="%s"\n' % workspace_status_command
)
# generate "failing" and "success" report
# the "failing" is named as "sponge_log.xml", which is the name picked up by sponge/resultstore
# so the failing report will be used by default (unless we later replace the report with
# one that says "success"). That way if something goes wrong before bazel is run,
# there will at least be a "failing" target that indicates that (we really don't want silent failures).
with open(failing_report_filename, "w") as f:
f.write(
_generate_junit_report_string(
report_suite_name, invocation_id, success=False
)
)
with open(success_report_filename, "w") as f:
f.write(
_generate_junit_report_string(
report_suite_name, invocation_id, success=True
)
)
# generate the bazel wrapper for linux/macos
with open(bazel_wrapper_filename, "w") as f:
intro_lines = [
"#!/bin/bash",
"set -ex",
"",
'tools/bazel --bazelrc="%s" "$@" || FAILED=true'
% bazel_rc_filename,
"",
]
if upload_results:
upload_results_lines = [
"sleep %s" % _UPLOAD_RBE_RESULTS_DELAY_SECONDS,
"PYTHONHTTPSVERIFY=0 python3"
" ./tools/run_tests/python_utils/upload_rbe_results.py"
' --invocation_id="%s"' % invocation_id,
"",
]
else:
upload_results_lines = []
outro_lines = [
'if [ "$FAILED" != "" ]',
"then",
" exit 1",
"else",
(
" # success: plant the pre-generated xml report that says"
' "success"'
),
" mv -f %s %s"
% (success_report_filename, failing_report_filename),
"fi",
]
lines = [
line + "\n"
for line in intro_lines + upload_results_lines + outro_lines
]
f.writelines(lines)
os.chmod(bazel_wrapper_filename, 0o775) # make the unix wrapper executable
# generate bazel wrapper for windows
with open(bazel_wrapper_bat_filename, "w") as f:
intro_lines = [
"@echo on",
"",
'bazel --bazelrc="%s" %%*' % bazel_rc_filename,
"set BAZEL_EXITCODE=%errorlevel%",
"",
]
if upload_results:
upload_results_lines = [
"sleep %s" % _UPLOAD_RBE_RESULTS_DELAY_SECONDS,
"python3 tools/run_tests/python_utils/upload_rbe_results.py"
' --invocation_id="%s" || exit /b 1' % invocation_id,
"",
]
else:
upload_results_lines = []
outro_lines = [
"if %BAZEL_EXITCODE% == 0 (",
(
" @rem success: plant the pre-generated xml report that says"
' "success"'
),
" mv -f %s %s"
% (success_report_filename, failing_report_filename),
")",
"exit /b %BAZEL_EXITCODE%",
]
lines = [
line + "\n"
for line in intro_lines + upload_results_lines + outro_lines
]
f.writelines(lines)
print("Bazel invocation ID: %s" % invocation_id, file=sys.stderr)
print(
"Upload test results to BigQuery after bazel runs: %s" % upload_results,
file=sys.stderr,
)
print(
"Generated bazel wrapper: %s" % bazel_wrapper_filename, file=sys.stderr
)
print(
"Generated bazel wrapper: %s" % bazel_wrapper_bat_filename,
file=sys.stderr,
)
if __name__ == "__main__":
# parse command line
argp = argparse.ArgumentParser(
description=(
"Generate bazel wrapper to help with bazel test reports in CI."
)
)
argp.add_argument(
"--report_path",
required=True,
type=str,
help=(
"Path under which the bazel wrapper and other files are going to be"
" generated"
),
)
argp.add_argument(
"--report_suite_name",
default="bazel_invocations",
type=str,
help="Test suite name to use in generated XML report",
)
args = argp.parse_args()
# generate new bazel invocation ID
invocation_id = str(uuid.uuid4())
report_path = args.report_path
report_suite_name = args.report_suite_name
upload_results = True if os.getenv("UPLOAD_TEST_RESULTS") else False
_append_to_kokoro_bazel_invocations(invocation_id)
_create_bazel_wrapper(
report_path, report_suite_name, invocation_id, upload_results
)
| 10,420
| 33.506623
| 137
|
py
|
grpc
|
grpc-master/tools/run_tests/python_utils/upload_rbe_results.py
|
#!/usr/bin/env python3
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Uploads RBE results to BigQuery"""
import argparse
import json
import os
import ssl
import sys
import urllib.error
import urllib.parse
import urllib.request
import uuid
gcp_utils_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), "../../gcp/utils")
)
sys.path.append(gcp_utils_dir)
import big_query_utils
_DATASET_ID = "jenkins_test_results"
_DESCRIPTION = "Test results from master RBE builds on Kokoro"
# 365 days in milliseconds
_EXPIRATION_MS = 365 * 24 * 60 * 60 * 1000
_PARTITION_TYPE = "DAY"
_PROJECT_ID = "grpc-testing"
_RESULTS_SCHEMA = [
("job_name", "STRING", "Name of Kokoro job"),
("build_id", "INTEGER", "Build ID of Kokoro job"),
("build_url", "STRING", "URL of Kokoro build"),
("test_target", "STRING", "Bazel target path"),
("test_class_name", "STRING", "Name of test class"),
("test_case", "STRING", "Name of test case"),
("result", "STRING", "Test or build result"),
("timestamp", "TIMESTAMP", "Timestamp of test run"),
("duration", "FLOAT", "Duration of the test run"),
]
_TABLE_ID = "rbe_test_results"
def _get_api_key():
"""Returns string with API key to access ResultStore.
Intended to be used in Kokoro environment."""
api_key_directory = os.getenv("KOKORO_GFILE_DIR")
api_key_file = os.path.join(api_key_directory, "resultstore_api_key")
assert os.path.isfile(api_key_file), (
"Must add --api_key arg if not on "
"Kokoro or Kokoro environment is not set up properly."
)
with open(api_key_file, "r") as f:
return f.read().replace("\n", "")
def _get_invocation_id():
"""Returns String of Bazel invocation ID. Intended to be used in
Kokoro environment."""
bazel_id_directory = os.getenv("KOKORO_ARTIFACTS_DIR")
bazel_id_file = os.path.join(bazel_id_directory, "bazel_invocation_ids")
assert os.path.isfile(bazel_id_file), (
"bazel_invocation_ids file, written "
"by RBE initialization script, expected but not found."
)
with open(bazel_id_file, "r") as f:
return f.read().replace("\n", "")
def _parse_test_duration(duration_str):
"""Parse test duration string in '123.567s' format"""
try:
if duration_str.endswith("s"):
duration_str = duration_str[:-1]
return float(duration_str)
except:
return None
def _upload_results_to_bq(rows):
"""Upload test results to a BQ table.
Args:
rows: A list of dictionaries containing data for each row to insert
"""
bq = big_query_utils.create_big_query()
big_query_utils.create_partitioned_table(
bq,
_PROJECT_ID,
_DATASET_ID,
_TABLE_ID,
_RESULTS_SCHEMA,
_DESCRIPTION,
partition_type=_PARTITION_TYPE,
expiration_ms=_EXPIRATION_MS,
)
max_retries = 3
for attempt in range(max_retries):
if big_query_utils.insert_rows(
bq, _PROJECT_ID, _DATASET_ID, _TABLE_ID, rows
):
break
else:
if attempt < max_retries - 1:
print("Error uploading result to bigquery, will retry.")
else:
print(
"Error uploading result to bigquery, all attempts failed."
)
sys.exit(1)
def _get_resultstore_data(api_key, invocation_id):
"""Returns dictionary of test results by querying ResultStore API.
Args:
api_key: String of ResultStore API key
invocation_id: String of ResultStore invocation ID to results from
"""
all_actions = []
page_token = ""
# ResultStore's API returns data on a limited number of tests. When we exceed
# that limit, the 'nextPageToken' field is included in the request to get
# subsequent data, so keep requesting until 'nextPageToken' field is omitted.
while True:
req = urllib.request.Request(
url="https://resultstore.googleapis.com/v2/invocations/%s/targets/-/configuredTargets/-/actions?key=%s&pageToken=%s&fields=next_page_token,actions.id,actions.status_attributes,actions.timing,actions.test_action"
% (invocation_id, api_key, page_token),
headers={"Content-Type": "application/json"},
)
ctx_dict = {}
if os.getenv("PYTHONHTTPSVERIFY") == "0":
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
ctx_dict = {"context": ctx}
raw_resp = urllib.request.urlopen(req, **ctx_dict).read()
decoded_resp = (
raw_resp
if isinstance(raw_resp, str)
else raw_resp.decode("utf-8", "ignore")
)
results = json.loads(decoded_resp)
all_actions.extend(results["actions"])
if "nextPageToken" not in results:
break
page_token = results["nextPageToken"]
return all_actions
if __name__ == "__main__":
# Arguments are necessary if running in a non-Kokoro environment.
argp = argparse.ArgumentParser(
description=(
"Fetches results for given RBE invocation and uploads them to"
" BigQuery table."
)
)
argp.add_argument(
"--api_key",
default="",
type=str,
help="The API key to read from ResultStore API",
)
argp.add_argument(
"--invocation_id",
default="",
type=str,
help="UUID of bazel invocation to fetch.",
)
argp.add_argument(
"--bq_dump_file",
default=None,
type=str,
help="Dump JSON data to file just before uploading",
)
argp.add_argument(
"--resultstore_dump_file",
default=None,
type=str,
help="Dump JSON data as received from ResultStore API",
)
argp.add_argument(
"--skip_upload",
default=False,
action="store_const",
const=True,
help="Skip uploading to bigquery",
)
args = argp.parse_args()
api_key = args.api_key or _get_api_key()
invocation_id = args.invocation_id or _get_invocation_id()
resultstore_actions = _get_resultstore_data(api_key, invocation_id)
if args.resultstore_dump_file:
with open(args.resultstore_dump_file, "w") as f:
json.dump(resultstore_actions, f, indent=4, sort_keys=True)
print(
("Dumped resultstore data to file %s" % args.resultstore_dump_file)
)
# google.devtools.resultstore.v2.Action schema:
# https://github.com/googleapis/googleapis/blob/master/google/devtools/resultstore/v2/action.proto
bq_rows = []
for index, action in enumerate(resultstore_actions):
# Filter out non-test related data, such as build results.
if "testAction" not in action:
continue
# Some test results contain the fileProcessingErrors field, which indicates
# an issue with parsing results individual test cases.
if "fileProcessingErrors" in action:
test_cases = [
{
"testCase": {
"caseName": str(action["id"]["actionId"]),
}
}
]
# Test timeouts have a different dictionary structure compared to pass and
# fail results.
elif action["statusAttributes"]["status"] == "TIMED_OUT":
test_cases = [
{
"testCase": {
"caseName": str(action["id"]["actionId"]),
"timedOut": True,
}
}
]
# When RBE believes its infrastructure is failing, it will abort and
# mark running tests as UNKNOWN. These infrastructure failures may be
# related to our tests, so we should investigate if specific tests are
# repeatedly being marked as UNKNOWN.
elif action["statusAttributes"]["status"] == "UNKNOWN":
test_cases = [
{
"testCase": {
"caseName": str(action["id"]["actionId"]),
"unknown": True,
}
}
]
# Take the timestamp from the previous action, which should be
# a close approximation.
action["timing"] = {
"startTime": resultstore_actions[index - 1]["timing"][
"startTime"
]
}
elif "testSuite" not in action["testAction"]:
continue
elif "tests" not in action["testAction"]["testSuite"]:
continue
else:
test_cases = []
for tests_item in action["testAction"]["testSuite"]["tests"]:
test_cases += tests_item["testSuite"]["tests"]
for test_case in test_cases:
if any(s in test_case["testCase"] for s in ["errors", "failures"]):
result = "FAILED"
elif "timedOut" in test_case["testCase"]:
result = "TIMEOUT"
elif "unknown" in test_case["testCase"]:
result = "UNKNOWN"
else:
result = "PASSED"
try:
bq_rows.append(
{
"insertId": str(uuid.uuid4()),
"json": {
"job_name": os.getenv("KOKORO_JOB_NAME"),
"build_id": os.getenv("KOKORO_BUILD_NUMBER"),
"build_url": "https://source.cloud.google.com/results/invocations/%s"
% invocation_id,
"test_target": action["id"]["targetId"],
"test_class_name": test_case["testCase"].get(
"className", ""
),
"test_case": test_case["testCase"]["caseName"],
"result": result,
"timestamp": action["timing"]["startTime"],
"duration": _parse_test_duration(
action["timing"]["duration"]
),
},
}
)
except Exception as e:
print(("Failed to parse test result. Error: %s" % str(e)))
print((json.dumps(test_case, indent=4)))
bq_rows.append(
{
"insertId": str(uuid.uuid4()),
"json": {
"job_name": os.getenv("KOKORO_JOB_NAME"),
"build_id": os.getenv("KOKORO_BUILD_NUMBER"),
"build_url": "https://source.cloud.google.com/results/invocations/%s"
% invocation_id,
"test_target": action["id"]["targetId"],
"test_class_name": "N/A",
"test_case": "N/A",
"result": "UNPARSEABLE",
"timestamp": "N/A",
},
}
)
if args.bq_dump_file:
with open(args.bq_dump_file, "w") as f:
json.dump(bq_rows, f, indent=4, sort_keys=True)
print(("Dumped BQ data to file %s" % args.bq_dump_file))
if not args.skip_upload:
# BigQuery sometimes fails with large uploads, so batch 1,000 rows at a time.
MAX_ROWS = 1000
for i in range(0, len(bq_rows), MAX_ROWS):
_upload_results_to_bq(bq_rows[i : i + MAX_ROWS])
else:
print("Skipped upload to bigquery.")
| 12,325
| 36.238671
| 223
|
py
|
grpc
|
grpc-master/tools/run_tests/python_utils/dockerjob.py
|
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers to run docker instances as jobs."""
from __future__ import print_function
import json
import os
import subprocess
import sys
import tempfile
import time
import uuid
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import jobset
_DEVNULL = open(os.devnull, "w")
def random_name(base_name):
"""Randomizes given base name."""
return "%s_%s" % (base_name, uuid.uuid4())
def docker_kill(cid):
"""Kills a docker container. Returns True if successful."""
return (
subprocess.call(
["docker", "kill", str(cid)],
stdin=subprocess.PIPE,
stdout=_DEVNULL,
stderr=subprocess.STDOUT,
)
== 0
)
def docker_mapped_port(cid, port, timeout_seconds=15):
"""Get port mapped to internal given internal port for given container."""
started = time.time()
while time.time() - started < timeout_seconds:
try:
output = subprocess.check_output(
"docker port %s %s" % (cid, port), stderr=_DEVNULL, shell=True
).decode()
return int(output.split(":", 2)[1])
except subprocess.CalledProcessError as e:
pass
raise Exception(
"Failed to get exposed port %s for container %s." % (port, cid)
)
def docker_ip_address(cid, timeout_seconds=15):
"""Get port mapped to internal given internal port for given container."""
started = time.time()
while time.time() - started < timeout_seconds:
cmd = "docker inspect %s" % cid
try:
output = subprocess.check_output(
cmd, stderr=_DEVNULL, shell=True
).decode()
json_info = json.loads(output)
assert len(json_info) == 1
out = json_info[0]["NetworkSettings"]["IPAddress"]
if not out:
continue
return out
except subprocess.CalledProcessError as e:
pass
raise Exception(
"Non-retryable error: Failed to get ip address of container %s." % cid
)
def wait_for_healthy(cid, shortname, timeout_seconds):
"""Wait timeout_seconds for the container to become healthy"""
started = time.time()
while time.time() - started < timeout_seconds:
try:
output = subprocess.check_output(
[
"docker",
"inspect",
'--format="{{.State.Health.Status}}"',
cid,
],
stderr=_DEVNULL,
).decode()
if output.strip("\n") == "healthy":
return
except subprocess.CalledProcessError as e:
pass
time.sleep(1)
raise Exception(
"Timed out waiting for %s (%s) to pass health check" % (shortname, cid)
)
def finish_jobs(jobs, suppress_failure=True):
"""Kills given docker containers and waits for corresponding jobs to finish"""
for job in jobs:
job.kill(suppress_failure=suppress_failure)
while any(job.is_running() for job in jobs):
time.sleep(1)
def image_exists(image):
"""Returns True if given docker image exists."""
return (
subprocess.call(
["docker", "inspect", image],
stdin=subprocess.PIPE,
stdout=_DEVNULL,
stderr=subprocess.STDOUT,
)
== 0
)
def remove_image(image, skip_nonexistent=False, max_retries=10):
"""Attempts to remove docker image with retries."""
if skip_nonexistent and not image_exists(image):
return True
for attempt in range(0, max_retries):
if (
subprocess.call(
["docker", "rmi", "-f", image],
stdin=subprocess.PIPE,
stdout=_DEVNULL,
stderr=subprocess.STDOUT,
)
== 0
):
return True
time.sleep(2)
print("Failed to remove docker image %s" % image)
return False
class DockerJob:
"""Encapsulates a job"""
def __init__(self, spec):
self._spec = spec
self._job = jobset.Job(
spec, newline_on_success=True, travis=True, add_env={}
)
self._container_name = spec.container_name
def mapped_port(self, port):
return docker_mapped_port(self._container_name, port)
def ip_address(self):
return docker_ip_address(self._container_name)
def wait_for_healthy(self, timeout_seconds):
wait_for_healthy(
self._container_name, self._spec.shortname, timeout_seconds
)
def kill(self, suppress_failure=False):
"""Sends kill signal to the container."""
if suppress_failure:
self._job.suppress_failure_message()
return docker_kill(self._container_name)
def is_running(self):
"""Polls a job and returns True if given job is still running."""
return self._job.state() == jobset._RUNNING
| 5,545
| 29.141304
| 82
|
py
|
grpc
|
grpc-master/tools/run_tests/python_utils/__init__.py
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 577
| 40.285714
| 74
|
py
|
grpc
|
grpc-master/tools/run_tests/python_utils/antagonist.py
|
#!/usr/bin/env python3
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is used by run_tests.py to create cpu load on a machine"""
while True:
pass
| 689
| 35.315789
| 74
|
py
|
grpc
|
grpc-master/tools/run_tests/artifacts/artifact_targets.py
|
#!/usr/bin/env python3
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of targets to build artifacts."""
import os.path
import random
import string
import sys
sys.path.insert(0, os.path.abspath(".."))
import python_utils.jobset as jobset
_LATEST_MANYLINUX = "manylinux2014"
def create_docker_jobspec(
name,
dockerfile_dir,
shell_command,
environ={},
flake_retries=0,
timeout_retries=0,
timeout_seconds=30 * 60,
extra_docker_args=None,
verbose_success=False,
):
"""Creates jobspec for a task running under docker."""
environ = environ.copy()
environ["ARTIFACTS_OUT"] = "artifacts/%s" % name
docker_args = []
for k, v in list(environ.items()):
docker_args += ["-e", "%s=%s" % (k, v)]
docker_env = {
"DOCKERFILE_DIR": dockerfile_dir,
"DOCKER_RUN_SCRIPT": "tools/run_tests/dockerize/docker_run.sh",
"DOCKER_RUN_SCRIPT_COMMAND": shell_command,
"OUTPUT_DIR": "artifacts",
}
if extra_docker_args is not None:
docker_env["EXTRA_DOCKER_ARGS"] = extra_docker_args
jobspec = jobset.JobSpec(
cmdline=["tools/run_tests/dockerize/build_and_run_docker.sh"]
+ docker_args,
environ=docker_env,
shortname="build_artifact.%s" % (name),
timeout_seconds=timeout_seconds,
flake_retries=flake_retries,
timeout_retries=timeout_retries,
verbose_success=verbose_success,
)
return jobspec
def create_jobspec(
name,
cmdline,
environ={},
shell=False,
flake_retries=0,
timeout_retries=0,
timeout_seconds=30 * 60,
use_workspace=False,
cpu_cost=1.0,
verbose_success=False,
):
"""Creates jobspec."""
environ = environ.copy()
if use_workspace:
environ["WORKSPACE_NAME"] = "workspace_%s" % name
environ["ARTIFACTS_OUT"] = os.path.join("..", "artifacts", name)
cmdline = [
"bash",
"tools/run_tests/artifacts/run_in_workspace.sh",
] + cmdline
else:
environ["ARTIFACTS_OUT"] = os.path.join("artifacts", name)
jobspec = jobset.JobSpec(
cmdline=cmdline,
environ=environ,
shortname="build_artifact.%s" % (name),
timeout_seconds=timeout_seconds,
flake_retries=flake_retries,
timeout_retries=timeout_retries,
shell=shell,
cpu_cost=cpu_cost,
verbose_success=verbose_success,
)
return jobspec
_MACOS_COMPAT_FLAG = "-mmacosx-version-min=10.10"
_ARCH_FLAG_MAP = {"x86": "-m32", "x64": "-m64"}
class PythonArtifact:
"""Builds Python artifacts."""
def __init__(self, platform, arch, py_version, presubmit=False):
self.name = "python_%s_%s_%s" % (platform, arch, py_version)
self.platform = platform
self.arch = arch
self.labels = ["artifact", "python", platform, arch, py_version]
if presubmit:
self.labels.append("presubmit")
self.py_version = py_version
if platform == _LATEST_MANYLINUX:
self.labels.append("latest-manylinux")
if "manylinux" in platform:
self.labels.append("linux")
if "linux_extra" in platform:
# linux_extra wheels used to be built by a separate kokoro job.
# Their build is now much faster, so they can be included
# in the regular artifact build.
self.labels.append("linux")
if "musllinux" in platform:
self.labels.append("linux")
def pre_build_jobspecs(self):
return []
def build_jobspec(self, inner_jobs=None):
environ = {}
if inner_jobs is not None:
# set number of parallel jobs when building native extension
# building the native extension is the most time-consuming part of the build
environ["GRPC_PYTHON_BUILD_EXT_COMPILER_JOBS"] = str(inner_jobs)
if self.platform == "macos":
environ["ARCHFLAGS"] = "-arch arm64 -arch x86_64"
environ["GRPC_UNIVERSAL2_REPAIR"] = "true"
environ["GRPC_BUILD_WITH_BORING_SSL_ASM"] = "false"
if self.platform == "linux_extra":
# Crosscompilation build for armv7 (e.g. Raspberry Pi)
environ["PYTHON"] = "/opt/python/{}/bin/python3".format(
self.py_version
)
environ["PIP"] = "/opt/python/{}/bin/pip3".format(self.py_version)
environ["GRPC_SKIP_PIP_CYTHON_UPGRADE"] = "TRUE"
environ["GRPC_SKIP_TWINE_CHECK"] = "TRUE"
return create_docker_jobspec(
self.name,
"tools/dockerfile/grpc_artifact_python_linux_{}".format(
self.arch
),
"tools/run_tests/artifacts/build_artifact_python.sh",
environ=environ,
timeout_seconds=60 * 60,
)
elif "manylinux" in self.platform:
if self.arch == "x86":
environ["SETARCH_CMD"] = "linux32"
# Inside the manylinux container, the python installations are located in
# special places...
environ["PYTHON"] = "/opt/python/{}/bin/python".format(
self.py_version
)
environ["PIP"] = "/opt/python/{}/bin/pip".format(self.py_version)
environ["GRPC_SKIP_PIP_CYTHON_UPGRADE"] = "TRUE"
if self.arch == "aarch64":
environ["GRPC_SKIP_TWINE_CHECK"] = "TRUE"
# As we won't strip the binary with auditwheel (see below), strip
# it at link time.
environ["LDFLAGS"] = "-s"
else:
# only run auditwheel if we're not crosscompiling
environ["GRPC_RUN_AUDITWHEEL_REPAIR"] = "TRUE"
# only build the packages that depend on grpcio-tools
# if we're not crosscompiling.
# - they require protoc to run on current architecture
# - they only have sdist packages anyway, so it's useless to build them again
environ["GRPC_BUILD_GRPCIO_TOOLS_DEPENDENTS"] = "TRUE"
return create_docker_jobspec(
self.name,
"tools/dockerfile/grpc_artifact_python_%s_%s"
% (self.platform, self.arch),
"tools/run_tests/artifacts/build_artifact_python.sh",
environ=environ,
timeout_seconds=60 * 60 * 2,
)
elif "musllinux" in self.platform:
environ["PYTHON"] = "/opt/python/{}/bin/python".format(
self.py_version
)
environ["PIP"] = "/opt/python/{}/bin/pip".format(self.py_version)
environ["GRPC_SKIP_PIP_CYTHON_UPGRADE"] = "TRUE"
environ["GRPC_RUN_AUDITWHEEL_REPAIR"] = "TRUE"
environ["GRPC_PYTHON_BUILD_WITH_STATIC_LIBSTDCXX"] = "TRUE"
return create_docker_jobspec(
self.name,
"tools/dockerfile/grpc_artifact_python_%s_%s"
% (self.platform, self.arch),
"tools/run_tests/artifacts/build_artifact_python.sh",
environ=environ,
timeout_seconds=60 * 60 * 2,
)
elif self.platform == "windows":
environ["EXT_COMPILER"] = "msvc"
# For some reason, the batch script %random% always runs with the same
# seed. We create a random temp-dir here
dir = "".join(
random.choice(string.ascii_uppercase) for _ in range(10)
)
return create_jobspec(
self.name,
[
"tools\\run_tests\\artifacts\\build_artifact_python.bat",
self.py_version,
"32" if self.arch == "x86" else "64",
],
environ=environ,
timeout_seconds=45 * 60,
use_workspace=True,
)
else:
environ["PYTHON"] = self.py_version
environ["SKIP_PIP_INSTALL"] = "TRUE"
return create_jobspec(
self.name,
["tools/run_tests/artifacts/build_artifact_python.sh"],
environ=environ,
timeout_seconds=60 * 60 * 2,
use_workspace=True,
)
def __str__(self):
return self.name
class RubyArtifact:
"""Builds ruby native gem."""
def __init__(self, platform, gem_platform, presubmit=False):
self.name = "ruby_native_gem_%s_%s" % (platform, gem_platform)
self.platform = platform
self.gem_platform = gem_platform
self.labels = ["artifact", "ruby", platform, gem_platform]
if presubmit:
self.labels.append("presubmit")
def pre_build_jobspecs(self):
return []
def build_jobspec(self, inner_jobs=None):
environ = {}
if inner_jobs is not None:
# set number of parallel jobs when building native extension
environ["GRPC_RUBY_BUILD_PROCS"] = str(inner_jobs)
# Ruby build uses docker internally and docker cannot be nested.
# We are using a custom workspace instead.
return create_jobspec(
self.name,
[
"tools/run_tests/artifacts/build_artifact_ruby.sh",
self.gem_platform,
],
use_workspace=True,
timeout_seconds=90 * 60,
environ=environ,
)
class PHPArtifact:
"""Builds PHP PECL package"""
def __init__(self, platform, arch, presubmit=False):
self.name = "php_pecl_package_{0}_{1}".format(platform, arch)
self.platform = platform
self.arch = arch
self.labels = ["artifact", "php", platform, arch]
if presubmit:
self.labels.append("presubmit")
def pre_build_jobspecs(self):
return []
def build_jobspec(self, inner_jobs=None):
del inner_jobs # arg unused as PHP artifact build is basically just packing an archive
if self.platform == "linux":
return create_docker_jobspec(
self.name,
"tools/dockerfile/test/php73_zts_debian11_{}".format(self.arch),
"tools/run_tests/artifacts/build_artifact_php.sh",
)
else:
return create_jobspec(
self.name,
["tools/run_tests/artifacts/build_artifact_php.sh"],
use_workspace=True,
)
class ProtocArtifact:
"""Builds protoc and protoc-plugin artifacts"""
def __init__(self, platform, arch, presubmit=False):
self.name = "protoc_%s_%s" % (platform, arch)
self.platform = platform
self.arch = arch
self.labels = ["artifact", "protoc", platform, arch]
if presubmit:
self.labels.append("presubmit")
def pre_build_jobspecs(self):
return []
def build_jobspec(self, inner_jobs=None):
environ = {}
if inner_jobs is not None:
# set number of parallel jobs when building protoc
environ["GRPC_PROTOC_BUILD_COMPILER_JOBS"] = str(inner_jobs)
if self.platform != "windows":
environ["CXXFLAGS"] = ""
environ["LDFLAGS"] = ""
if self.platform == "linux":
dockerfile_dir = (
"tools/dockerfile/grpc_artifact_centos6_{}".format(
self.arch
)
)
if self.arch == "aarch64":
# for aarch64, use a dockcross manylinux image that will
# give us both ready to use crosscompiler and sufficient backward compatibility
dockerfile_dir = (
"tools/dockerfile/grpc_artifact_protoc_aarch64"
)
environ["LDFLAGS"] += " -static-libgcc -static-libstdc++ -s"
return create_docker_jobspec(
self.name,
dockerfile_dir,
"tools/run_tests/artifacts/build_artifact_protoc.sh",
environ=environ,
)
else:
environ["CXXFLAGS"] += (
" -std=c++14 -stdlib=libc++ %s" % _MACOS_COMPAT_FLAG
)
return create_jobspec(
self.name,
["tools/run_tests/artifacts/build_artifact_protoc.sh"],
environ=environ,
timeout_seconds=60 * 60,
use_workspace=True,
)
else:
vs_tools_architecture = (
self.arch
) # architecture selector passed to vcvarsall.bat
environ["ARCHITECTURE"] = vs_tools_architecture
return create_jobspec(
self.name,
["tools\\run_tests\\artifacts\\build_artifact_protoc.bat"],
environ=environ,
use_workspace=True,
)
def __str__(self):
return self.name
def _reorder_targets_for_build_speed(targets):
"""Reorder targets to achieve optimal build speed"""
# ruby artifact build builds multiple artifacts at once, so make sure
# we start building ruby artifacts first, so that they don't end up
# being a long tail once everything else finishes.
return list(
sorted(
targets,
key=lambda target: 0 if target.name.startswith("ruby_") else 1,
)
)
def targets():
"""Gets list of supported targets"""
return _reorder_targets_for_build_speed(
[
ProtocArtifact("linux", "x64", presubmit=True),
ProtocArtifact("linux", "x86", presubmit=True),
ProtocArtifact("linux", "aarch64", presubmit=True),
ProtocArtifact("macos", "x64", presubmit=True),
ProtocArtifact("windows", "x64", presubmit=True),
ProtocArtifact("windows", "x86", presubmit=True),
PythonArtifact(
"manylinux2014", "x64", "cp37-cp37m", presubmit=True
),
PythonArtifact("manylinux2014", "x64", "cp38-cp38", presubmit=True),
PythonArtifact("manylinux2014", "x64", "cp39-cp39"),
PythonArtifact("manylinux2014", "x64", "cp310-cp310"),
PythonArtifact(
"manylinux2014", "x64", "cp311-cp311", presubmit=True
),
PythonArtifact(
"manylinux2014", "x86", "cp37-cp37m", presubmit=True
),
PythonArtifact("manylinux2014", "x86", "cp38-cp38", presubmit=True),
PythonArtifact("manylinux2014", "x86", "cp39-cp39"),
PythonArtifact("manylinux2014", "x86", "cp310-cp310"),
PythonArtifact(
"manylinux2014", "x86", "cp311-cp311", presubmit=True
),
PythonArtifact(
"manylinux2014", "aarch64", "cp37-cp37m", presubmit=True
),
PythonArtifact(
"manylinux2014", "aarch64", "cp38-cp38", presubmit=True
),
PythonArtifact("manylinux2014", "aarch64", "cp39-cp39"),
PythonArtifact("manylinux2014", "aarch64", "cp310-cp310"),
PythonArtifact("manylinux2014", "aarch64", "cp311-cp311"),
PythonArtifact(
"linux_extra", "armv7", "cp37-cp37m", presubmit=True
),
PythonArtifact("linux_extra", "armv7", "cp38-cp38"),
PythonArtifact("linux_extra", "armv7", "cp39-cp39"),
PythonArtifact("linux_extra", "armv7", "cp310-cp310"),
PythonArtifact(
"linux_extra", "armv7", "cp311-cp311", presubmit=True
),
PythonArtifact("musllinux_1_1", "x64", "cp310-cp310"),
PythonArtifact(
"musllinux_1_1", "x64", "cp311-cp311", presubmit=True
),
PythonArtifact(
"musllinux_1_1", "x64", "cp37-cp37m", presubmit=True
),
PythonArtifact("musllinux_1_1", "x64", "cp38-cp38"),
PythonArtifact("musllinux_1_1", "x64", "cp39-cp39"),
PythonArtifact("musllinux_1_1", "x86", "cp310-cp310"),
PythonArtifact(
"musllinux_1_1", "x86", "cp311-cp311", presubmit=True
),
PythonArtifact(
"musllinux_1_1", "x86", "cp37-cp37m", presubmit=True
),
PythonArtifact("musllinux_1_1", "x86", "cp38-cp38"),
PythonArtifact("musllinux_1_1", "x86", "cp39-cp39"),
PythonArtifact("macos", "x64", "python3.7", presubmit=True),
PythonArtifact("macos", "x64", "python3.8"),
PythonArtifact("macos", "x64", "python3.9"),
PythonArtifact("macos", "x64", "python3.10", presubmit=True),
PythonArtifact("macos", "x64", "python3.11", presubmit=True),
PythonArtifact("windows", "x86", "Python37_32bit", presubmit=True),
PythonArtifact("windows", "x86", "Python38_32bit"),
PythonArtifact("windows", "x86", "Python39_32bit"),
PythonArtifact("windows", "x86", "Python310_32bit"),
PythonArtifact("windows", "x86", "Python311_32bit", presubmit=True),
PythonArtifact("windows", "x64", "Python37", presubmit=True),
PythonArtifact("windows", "x64", "Python38"),
PythonArtifact("windows", "x64", "Python39"),
PythonArtifact("windows", "x64", "Python310"),
PythonArtifact("windows", "x64", "Python311", presubmit=True),
RubyArtifact("linux", "x86-mingw32", presubmit=True),
RubyArtifact("linux", "x64-mingw32", presubmit=True),
RubyArtifact("linux", "x64-mingw-ucrt", presubmit=True),
RubyArtifact("linux", "x86_64-linux", presubmit=True),
RubyArtifact("linux", "x86-linux", presubmit=True),
RubyArtifact("linux", "aarch64-linux", presubmit=True),
RubyArtifact("linux", "x86_64-darwin", presubmit=True),
RubyArtifact("linux", "arm64-darwin", presubmit=True),
PHPArtifact("linux", "x64", presubmit=True),
PHPArtifact("macos", "x64", presubmit=True),
]
)
| 18,851
| 38.193347
| 99
|
py
|
grpc
|
grpc-master/tools/run_tests/artifacts/distribtest_targets.py
|
#!/usr/bin/env python3
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of targets run distribution package tests."""
import os.path
import sys
sys.path.insert(0, os.path.abspath(".."))
import python_utils.jobset as jobset
def create_docker_jobspec(
name,
dockerfile_dir,
shell_command,
environ={},
flake_retries=0,
timeout_retries=0,
copy_rel_path=None,
timeout_seconds=30 * 60,
):
"""Creates jobspec for a task running under docker."""
environ = environ.copy()
# the entire repo will be cloned if copy_rel_path is not set.
if copy_rel_path:
environ["RELATIVE_COPY_PATH"] = copy_rel_path
docker_args = []
for k, v in list(environ.items()):
docker_args += ["-e", "%s=%s" % (k, v)]
docker_env = {
"DOCKERFILE_DIR": dockerfile_dir,
"DOCKER_RUN_SCRIPT": "tools/run_tests/dockerize/docker_run.sh",
"DOCKER_RUN_SCRIPT_COMMAND": shell_command,
}
jobspec = jobset.JobSpec(
cmdline=["tools/run_tests/dockerize/build_and_run_docker.sh"]
+ docker_args,
environ=docker_env,
shortname="distribtest.%s" % (name),
timeout_seconds=timeout_seconds,
flake_retries=flake_retries,
timeout_retries=timeout_retries,
)
return jobspec
def create_jobspec(
name,
cmdline,
environ=None,
shell=False,
flake_retries=0,
timeout_retries=0,
use_workspace=False,
timeout_seconds=10 * 60,
):
"""Creates jobspec."""
environ = environ.copy()
if use_workspace:
environ["WORKSPACE_NAME"] = "workspace_%s" % name
cmdline = [
"bash",
"tools/run_tests/artifacts/run_in_workspace.sh",
] + cmdline
jobspec = jobset.JobSpec(
cmdline=cmdline,
environ=environ,
shortname="distribtest.%s" % (name),
timeout_seconds=timeout_seconds,
flake_retries=flake_retries,
timeout_retries=timeout_retries,
shell=shell,
)
return jobspec
class CSharpDistribTest(object):
"""Tests C# NuGet package"""
def __init__(
self,
platform,
arch,
docker_suffix=None,
use_dotnet_cli=False,
presubmit=False,
):
self.name = "csharp_%s_%s" % (platform, arch)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.labels = ["distribtest", "csharp", platform, arch]
if presubmit:
self.labels.append("presubmit")
self.script_suffix = ""
if docker_suffix:
self.name += "_%s" % docker_suffix
self.labels.append(docker_suffix)
if use_dotnet_cli:
self.name += "_dotnetcli"
self.script_suffix = "_dotnetcli"
self.labels.append("dotnetcli")
else:
self.labels.append("olddotnet")
def pre_build_jobspecs(self):
return []
def build_jobspec(self, inner_jobs=None):
del inner_jobs # arg unused as there is little opportunity for parallelizing whats inside the distribtests
if self.platform == "linux":
return create_docker_jobspec(
self.name,
"tools/dockerfile/distribtest/csharp_%s_%s"
% (self.docker_suffix, self.arch),
"test/distrib/csharp/run_distrib_test%s.sh"
% self.script_suffix,
copy_rel_path="test/distrib",
)
elif self.platform == "macos":
return create_jobspec(
self.name,
[
"test/distrib/csharp/run_distrib_test%s.sh"
% self.script_suffix
],
environ={
"EXTERNAL_GIT_ROOT": "../../../..",
"SKIP_NETCOREAPP21_DISTRIBTEST": "1",
"SKIP_NET50_DISTRIBTEST": "1",
},
use_workspace=True,
)
elif self.platform == "windows":
# TODO(jtattermusch): re-enable windows distribtest
return create_jobspec(
self.name,
["bash", "tools/run_tests/artifacts/run_distribtest_csharp.sh"],
environ={},
use_workspace=True,
)
else:
raise Exception("Not supported yet.")
def __str__(self):
return self.name
class PythonDistribTest(object):
"""Tests Python package"""
def __init__(
self, platform, arch, docker_suffix, source=False, presubmit=False
):
self.source = source
if source:
self.name = "python_dev_%s_%s_%s" % (platform, arch, docker_suffix)
else:
self.name = "python_%s_%s_%s" % (platform, arch, docker_suffix)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.labels = ["distribtest", "python", platform, arch, docker_suffix]
if presubmit:
self.labels.append("presubmit")
def pre_build_jobspecs(self):
return []
def build_jobspec(self, inner_jobs=None):
# TODO(jtattermusch): honor inner_jobs arg for this task.
del inner_jobs
if not self.platform == "linux":
raise Exception("Not supported yet.")
if self.source:
return create_docker_jobspec(
self.name,
"tools/dockerfile/distribtest/python_dev_%s_%s"
% (self.docker_suffix, self.arch),
"test/distrib/python/run_source_distrib_test.sh",
copy_rel_path="test/distrib",
)
else:
return create_docker_jobspec(
self.name,
"tools/dockerfile/distribtest/python_%s_%s"
% (self.docker_suffix, self.arch),
"test/distrib/python/run_binary_distrib_test.sh",
copy_rel_path="test/distrib",
)
def __str__(self):
return self.name
class RubyDistribTest(object):
"""Tests Ruby package"""
def __init__(
self,
platform,
arch,
docker_suffix,
ruby_version=None,
source=False,
presubmit=False,
):
self.package_type = "binary"
if source:
self.package_type = "source"
self.name = "ruby_%s_%s_%s_version_%s_package_type_%s" % (
platform,
arch,
docker_suffix,
ruby_version or "unspecified",
self.package_type,
)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.ruby_version = ruby_version
self.labels = ["distribtest", "ruby", platform, arch, docker_suffix]
if presubmit:
self.labels.append("presubmit")
def pre_build_jobspecs(self):
return []
def build_jobspec(self, inner_jobs=None):
# TODO(jtattermusch): honor inner_jobs arg for this task.
del inner_jobs
arch_to_gem_arch = {
"x64": "x86_64",
"x86": "x86",
}
if not self.platform == "linux":
raise Exception("Not supported yet.")
dockerfile_name = "tools/dockerfile/distribtest/ruby_%s_%s" % (
self.docker_suffix,
self.arch,
)
if self.ruby_version is not None:
dockerfile_name += "_%s" % self.ruby_version
return create_docker_jobspec(
self.name,
dockerfile_name,
"test/distrib/ruby/run_distrib_test.sh %s %s %s"
% (arch_to_gem_arch[self.arch], self.platform, self.package_type),
copy_rel_path="test/distrib",
)
def __str__(self):
return self.name
class PHP7DistribTest(object):
"""Tests PHP7 package"""
def __init__(self, platform, arch, docker_suffix=None, presubmit=False):
self.name = "php7_%s_%s_%s" % (platform, arch, docker_suffix)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.labels = ["distribtest", "php", "php7", platform, arch]
if presubmit:
self.labels.append("presubmit")
if docker_suffix:
self.labels.append(docker_suffix)
def pre_build_jobspecs(self):
return []
def build_jobspec(self, inner_jobs=None):
# TODO(jtattermusch): honor inner_jobs arg for this task.
del inner_jobs
if self.platform == "linux":
return create_docker_jobspec(
self.name,
"tools/dockerfile/distribtest/php7_%s_%s"
% (self.docker_suffix, self.arch),
"test/distrib/php/run_distrib_test.sh",
copy_rel_path="test/distrib",
)
elif self.platform == "macos":
return create_jobspec(
self.name,
["test/distrib/php/run_distrib_test_macos.sh"],
environ={"EXTERNAL_GIT_ROOT": "../../../.."},
timeout_seconds=20 * 60,
use_workspace=True,
)
else:
raise Exception("Not supported yet.")
def __str__(self):
return self.name
class CppDistribTest(object):
"""Tests Cpp make install by building examples."""
def __init__(
self, platform, arch, docker_suffix=None, testcase=None, presubmit=False
):
if platform == "linux":
self.name = "cpp_%s_%s_%s_%s" % (
platform,
arch,
docker_suffix,
testcase,
)
else:
self.name = "cpp_%s_%s_%s" % (platform, arch, testcase)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.testcase = testcase
self.labels = [
"distribtest",
"cpp",
platform,
arch,
testcase,
]
if presubmit:
self.labels.append("presubmit")
if docker_suffix:
self.labels.append(docker_suffix)
def pre_build_jobspecs(self):
return []
def build_jobspec(self, inner_jobs=None):
environ = {}
if inner_jobs is not None:
# set number of parallel jobs for the C++ build
environ["GRPC_CPP_DISTRIBTEST_BUILD_COMPILER_JOBS"] = str(
inner_jobs
)
if self.platform == "linux":
return create_docker_jobspec(
self.name,
"tools/dockerfile/distribtest/cpp_%s_%s"
% (self.docker_suffix, self.arch),
"test/distrib/cpp/run_distrib_test_%s.sh" % self.testcase,
timeout_seconds=45 * 60,
)
elif self.platform == "windows":
return create_jobspec(
self.name,
["test\\distrib\\cpp\\run_distrib_test_%s.bat" % self.testcase],
environ={},
timeout_seconds=45 * 60,
use_workspace=True,
)
else:
raise Exception("Not supported yet.")
def __str__(self):
return self.name
def targets():
"""Gets list of supported targets"""
return [
# C++
CppDistribTest("linux", "x64", "debian10", "cmake", presubmit=True),
CppDistribTest(
"linux", "x64", "debian10", "cmake_as_submodule", presubmit=True
),
CppDistribTest(
"linux",
"x64",
"debian10",
"cmake_as_externalproject",
presubmit=True,
),
CppDistribTest(
"linux", "x64", "debian10", "cmake_fetchcontent", presubmit=True
),
CppDistribTest(
"linux", "x64", "debian10", "cmake_module_install", presubmit=True
),
CppDistribTest(
"linux", "x64", "debian10", "cmake_pkgconfig", presubmit=True
),
CppDistribTest(
"linux",
"x64",
"debian10_aarch64_cross",
"cmake_aarch64_cross",
presubmit=True,
),
CppDistribTest("windows", "x86", testcase="cmake", presubmit=True),
CppDistribTest(
"windows",
"x86",
testcase="cmake_as_externalproject",
presubmit=True,
),
# C#
CSharpDistribTest(
"linux", "x64", "debian10", use_dotnet_cli=True, presubmit=True
),
CSharpDistribTest("linux", "x64", "ubuntu1604", use_dotnet_cli=True),
CSharpDistribTest(
"linux", "x64", "alpine", use_dotnet_cli=True, presubmit=True
),
CSharpDistribTest(
"linux", "x64", "dotnet31", use_dotnet_cli=True, presubmit=True
),
CSharpDistribTest(
"linux", "x64", "dotnet5", use_dotnet_cli=True, presubmit=True
),
CSharpDistribTest("macos", "x64", use_dotnet_cli=True, presubmit=True),
CSharpDistribTest("windows", "x86", presubmit=True),
CSharpDistribTest("windows", "x64", presubmit=True),
# Python
PythonDistribTest("linux", "x64", "buster", presubmit=True),
PythonDistribTest("linux", "x86", "buster", presubmit=True),
PythonDistribTest("linux", "x64", "fedora34"),
PythonDistribTest("linux", "x64", "arch"),
PythonDistribTest("linux", "x64", "alpine"),
PythonDistribTest("linux", "x64", "ubuntu2004"),
PythonDistribTest(
"linux", "aarch64", "python38_buster", presubmit=True
),
PythonDistribTest(
"linux", "x64", "alpine3.7", source=True, presubmit=True
),
PythonDistribTest(
"linux", "x64", "buster", source=True, presubmit=True
),
PythonDistribTest(
"linux", "x86", "buster", source=True, presubmit=True
),
PythonDistribTest("linux", "x64", "fedora34", source=True),
PythonDistribTest("linux", "x64", "arch", source=True),
PythonDistribTest("linux", "x64", "ubuntu2004", source=True),
# Ruby
RubyDistribTest(
"linux",
"x64",
"debian10",
ruby_version="ruby_2_6",
source=True,
presubmit=True,
),
RubyDistribTest(
"linux", "x64", "debian10", ruby_version="ruby_2_7", presubmit=True
),
RubyDistribTest("linux", "x64", "centos7"),
RubyDistribTest("linux", "x64", "ubuntu1604"),
RubyDistribTest("linux", "x64", "ubuntu1804", presubmit=True),
# PHP7
PHP7DistribTest("linux", "x64", "debian10", presubmit=True),
PHP7DistribTest("macos", "x64", presubmit=True),
]
| 15,363
| 31.345263
| 115
|
py
|
grpc
|
grpc-master/tools/run_tests/artifacts/package_targets.py
|
#!/usr/bin/env python3
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of targets to build distribution packages."""
import os.path
import sys
sys.path.insert(0, os.path.abspath(".."))
import python_utils.jobset as jobset
def create_docker_jobspec(
name,
dockerfile_dir,
shell_command,
environ={},
flake_retries=0,
timeout_retries=0,
):
"""Creates jobspec for a task running under docker."""
environ = environ.copy()
docker_args = []
for k, v in list(environ.items()):
docker_args += ["-e", "%s=%s" % (k, v)]
docker_env = {
"DOCKERFILE_DIR": dockerfile_dir,
"DOCKER_RUN_SCRIPT": "tools/run_tests/dockerize/docker_run.sh",
"DOCKER_RUN_SCRIPT_COMMAND": shell_command,
"OUTPUT_DIR": "artifacts",
}
jobspec = jobset.JobSpec(
cmdline=["tools/run_tests/dockerize/build_and_run_docker.sh"]
+ docker_args,
environ=docker_env,
shortname="build_package.%s" % (name),
timeout_seconds=30 * 60,
flake_retries=flake_retries,
timeout_retries=timeout_retries,
)
return jobspec
def create_jobspec(
name,
cmdline,
environ=None,
cwd=None,
shell=False,
flake_retries=0,
timeout_retries=0,
cpu_cost=1.0,
):
"""Creates jobspec."""
jobspec = jobset.JobSpec(
cmdline=cmdline,
environ=environ,
cwd=cwd,
shortname="build_package.%s" % (name),
timeout_seconds=10 * 60,
flake_retries=flake_retries,
timeout_retries=timeout_retries,
cpu_cost=cpu_cost,
shell=shell,
)
return jobspec
class CSharpPackage:
"""Builds C# packages."""
def __init__(self, platform):
self.platform = platform
self.labels = ["package", "csharp", self.platform]
self.name = "csharp_package_nuget_%s" % self.platform
self.labels += ["nuget"]
def pre_build_jobspecs(self):
return []
def build_jobspec(self, inner_jobs=None):
del inner_jobs # arg unused as there is little opportunity for parallelizing
environ = {
"GRPC_CSHARP_BUILD_SINGLE_PLATFORM_NUGET": os.getenv(
"GRPC_CSHARP_BUILD_SINGLE_PLATFORM_NUGET", ""
)
}
build_script = "src/csharp/build_nuget.sh"
if self.platform == "linux":
return create_docker_jobspec(
self.name,
"tools/dockerfile/test/csharp_debian11_x64",
build_script,
environ=environ,
)
else:
repo_root = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "..", "..", ".."
)
environ["EXTERNAL_GIT_ROOT"] = repo_root
return create_jobspec(
self.name, ["bash", build_script], environ=environ
)
def __str__(self):
return self.name
class RubyPackage:
"""Collects ruby gems created in the artifact phase"""
def __init__(self):
self.name = "ruby_package"
self.labels = ["package", "ruby", "linux"]
def pre_build_jobspecs(self):
return []
def build_jobspec(self, inner_jobs=None):
del inner_jobs # arg unused as this step simply collects preexisting artifacts
return create_docker_jobspec(
self.name,
"tools/dockerfile/grpc_artifact_centos6_x64",
"tools/run_tests/artifacts/build_package_ruby.sh",
)
class PythonPackage:
"""Collects python eggs and wheels created in the artifact phase"""
def __init__(self):
self.name = "python_package"
self.labels = ["package", "python", "linux"]
def pre_build_jobspecs(self):
return []
def build_jobspec(self, inner_jobs=None):
del inner_jobs # arg unused as this step simply collects preexisting artifacts
# since the python package build does very little, we can use virtually
# any image that has new-enough python, so reusing one of the images used
# for artifact building seems natural.
return create_docker_jobspec(
self.name,
"tools/dockerfile/grpc_artifact_python_manylinux2014_x64",
"tools/run_tests/artifacts/build_package_python.sh",
environ={"PYTHON": "/opt/python/cp39-cp39/bin/python"},
)
class PHPPackage:
"""Copy PHP PECL package artifact"""
def __init__(self):
self.name = "php_package"
self.labels = ["package", "php", "linux"]
def pre_build_jobspecs(self):
return []
def build_jobspec(self, inner_jobs=None):
del inner_jobs # arg unused as this step simply collects preexisting artifacts
return create_docker_jobspec(
self.name,
"tools/dockerfile/grpc_artifact_centos6_x64",
"tools/run_tests/artifacts/build_package_php.sh",
)
def targets():
"""Gets list of supported targets"""
return [
CSharpPackage("linux"),
CSharpPackage("macos"),
CSharpPackage("windows"),
RubyPackage(),
PythonPackage(),
PHPPackage(),
]
| 5,717
| 28.474227
| 87
|
py
|
grpc
|
grpc-master/tools/run_tests/artifacts/__init__.py
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 577
| 40.285714
| 74
|
py
|
grpc
|
grpc-master/tools/gcp/utils/big_query_utils.py
|
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import json
import uuid
from apiclient import discovery
from apiclient.errors import HttpError
import httplib2
from oauth2client.client import GoogleCredentials
# 30 days in milliseconds
_EXPIRATION_MS = 30 * 24 * 60 * 60 * 1000
NUM_RETRIES = 3
def create_big_query():
"""Authenticates with cloud platform and gets a BiqQuery service object"""
creds = GoogleCredentials.get_application_default()
return discovery.build(
"bigquery", "v2", credentials=creds, cache_discovery=False
)
def create_dataset(biq_query, project_id, dataset_id):
is_success = True
body = {
"datasetReference": {"projectId": project_id, "datasetId": dataset_id}
}
try:
dataset_req = biq_query.datasets().insert(
projectId=project_id, body=body
)
dataset_req.execute(num_retries=NUM_RETRIES)
except HttpError as http_error:
if http_error.resp.status == 409:
print("Warning: The dataset %s already exists" % dataset_id)
else:
# Note: For more debugging info, print "http_error.content"
print(
"Error in creating dataset: %s. Err: %s"
% (dataset_id, http_error)
)
is_success = False
return is_success
def create_table(
big_query, project_id, dataset_id, table_id, table_schema, description
):
fields = [
{
"name": field_name,
"type": field_type,
"description": field_description,
}
for (field_name, field_type, field_description) in table_schema
]
return create_table2(
big_query, project_id, dataset_id, table_id, fields, description
)
def create_partitioned_table(
big_query,
project_id,
dataset_id,
table_id,
table_schema,
description,
partition_type="DAY",
expiration_ms=_EXPIRATION_MS,
):
"""Creates a partitioned table. By default, a date-paritioned table is created with
each partition lasting 30 days after it was last modified.
"""
fields = [
{
"name": field_name,
"type": field_type,
"description": field_description,
}
for (field_name, field_type, field_description) in table_schema
]
return create_table2(
big_query,
project_id,
dataset_id,
table_id,
fields,
description,
partition_type,
expiration_ms,
)
def create_table2(
big_query,
project_id,
dataset_id,
table_id,
fields_schema,
description,
partition_type=None,
expiration_ms=None,
):
is_success = True
body = {
"description": description,
"schema": {"fields": fields_schema},
"tableReference": {
"datasetId": dataset_id,
"projectId": project_id,
"tableId": table_id,
},
}
if partition_type and expiration_ms:
body["timePartitioning"] = {
"type": partition_type,
"expirationMs": expiration_ms,
}
try:
table_req = big_query.tables().insert(
projectId=project_id, datasetId=dataset_id, body=body
)
res = table_req.execute(num_retries=NUM_RETRIES)
print('Successfully created %s "%s"' % (res["kind"], res["id"]))
except HttpError as http_error:
if http_error.resp.status == 409:
print("Warning: Table %s already exists" % table_id)
else:
print(
"Error in creating table: %s. Err: %s" % (table_id, http_error)
)
is_success = False
return is_success
def patch_table(big_query, project_id, dataset_id, table_id, fields_schema):
is_success = True
body = {
"schema": {"fields": fields_schema},
"tableReference": {
"datasetId": dataset_id,
"projectId": project_id,
"tableId": table_id,
},
}
try:
table_req = big_query.tables().patch(
projectId=project_id,
datasetId=dataset_id,
tableId=table_id,
body=body,
)
res = table_req.execute(num_retries=NUM_RETRIES)
print('Successfully patched %s "%s"' % (res["kind"], res["id"]))
except HttpError as http_error:
print("Error in creating table: %s. Err: %s" % (table_id, http_error))
is_success = False
return is_success
def insert_rows(big_query, project_id, dataset_id, table_id, rows_list):
is_success = True
body = {"rows": rows_list}
try:
insert_req = big_query.tabledata().insertAll(
projectId=project_id,
datasetId=dataset_id,
tableId=table_id,
body=body,
)
res = insert_req.execute(num_retries=NUM_RETRIES)
if res.get("insertErrors", None):
print("Error inserting rows! Response: %s" % res)
is_success = False
except HttpError as http_error:
print("Error inserting rows to the table %s" % table_id)
print("Error message: %s" % http_error)
is_success = False
return is_success
def sync_query_job(big_query, project_id, query, timeout=5000):
query_data = {"query": query, "timeoutMs": timeout}
query_job = None
try:
query_job = (
big_query.jobs()
.query(projectId=project_id, body=query_data)
.execute(num_retries=NUM_RETRIES)
)
except HttpError as http_error:
print("Query execute job failed with error: %s" % http_error)
print(http_error.content)
return query_job
# List of (column name, column type, description) tuples
def make_row(unique_row_id, row_values_dict):
"""row_values_dict is a dictionary of column name and column value."""
return {"insertId": unique_row_id, "json": row_values_dict}
| 6,529
| 27.893805
| 87
|
py
|
grpc
|
grpc-master/tools/buildgen/_utils.py
|
#!/usr/bin/env python3
# Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for build file generation scripts."""
import importlib.util
import os
import sys
import types
from typing import Any, List, Mapping, Union
def import_python_module(path: str) -> types.ModuleType:
"""Imports the Python file at the given path, returns a module object."""
module_name = os.path.basename(path).replace(".py", "")
spec = importlib.util.spec_from_file_location(module_name, path)
module = importlib.util.module_from_spec(spec)
sys.modules[module_name] = module
spec.loader.exec_module(module)
return module
class Bunch(dict):
"""Allows dot-accessible dictionaries."""
def __init__(self, d: Mapping):
dict.__init__(self, d)
self.__dict__.update(d)
def to_bunch(var: Any) -> Any:
"""Converts any kind of variable to a Bunch."""
if isinstance(var, list):
return [to_bunch(i) for i in var]
if isinstance(var, dict):
ret = {}
for k, v in list(var.items()):
if isinstance(v, (list, dict)):
v = to_bunch(v)
ret[k] = v
return Bunch(ret)
else:
return var
def merge_json(dst: Union[Mapping, List], add: Union[Mapping, List]) -> None:
"""Merges JSON objects recursively."""
if isinstance(dst, dict) and isinstance(add, dict):
for k, v in list(add.items()):
if k in dst:
if k.startswith("#"):
continue
merge_json(dst[k], v)
else:
dst[k] = v
elif isinstance(dst, list) and isinstance(add, list):
dst.extend(add)
else:
raise TypeError(
"Tried to merge incompatible objects %s %s\n\n%r\n\n%r"
% (type(dst).__name__, type(add).__name__, dst, add)
)
| 2,390
| 31.310811
| 77
|
py
|
grpc
|
grpc-master/tools/buildgen/extract_metadata_from_bazel_xml.py
|
#!/usr/bin/env python3
# Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Script to extract build metadata from bazel BUILD.
# To avoid having two sources of truth for the build metadata (build
# targets, source files, header files etc.), this script analyzes the contents
# of bazel BUILD files and generates a YAML file (currently called
# build_autogenerated.yaml). The format and semantics of the generated YAML files
# is chosen to match the format of a "build.yaml" file, which used
# to be build the source of truth for gRPC build before bazel became
# the primary build system.
# A good basic overview of the "build.yaml" format is available here:
# https://github.com/grpc/grpc/blob/master/templates/README.md. Note that
# while useful as an overview, the doc does not act as formal spec
# (formal spec does not exist in fact) and the doc can be incomplete,
# inaccurate or slightly out of date.
# TODO(jtattermusch): In the future we want to get rid of the legacy build.yaml
# format entirely or simplify it to a point where it becomes self-explanatory
# and doesn't need any detailed documentation.
import collections
import os
import subprocess
from typing import Any, Dict, Iterable, List, Optional
import xml.etree.ElementTree as ET
import build_cleaner
BuildMetadata = Dict[str, Any]
BuildDict = Dict[str, BuildMetadata]
BuildYaml = Dict[str, Any]
BuildMetadata = Dict[str, Any]
BuildDict = Dict[str, BuildMetadata]
BuildYaml = Dict[str, Any]
class ExternalProtoLibrary:
"""ExternalProtoLibrary is the struct about an external proto library.
Fields:
- destination(int): The relative path of this proto library should be.
Preferably, it should match the submodule path.
- proto_prefix(str): The prefix to remove in order to insure the proto import
is correct. For more info, see description of
https://github.com/grpc/grpc/pull/25272.
- urls(List[str]): Following 3 fields should be filled by build metadata from
Bazel.
- hash(str): The hash of the downloaded archive
- strip_prefix(str): The path to be stripped from the extracted directory, see
http_archive in Bazel.
"""
def __init__(
self, destination, proto_prefix, urls=None, hash="", strip_prefix=""
):
self.destination = destination
self.proto_prefix = proto_prefix
if urls is None:
self.urls = []
else:
self.urls = urls
self.hash = hash
self.strip_prefix = strip_prefix
EXTERNAL_PROTO_LIBRARIES = {
"envoy_api": ExternalProtoLibrary(
destination="third_party/envoy-api",
proto_prefix="third_party/envoy-api/",
),
"com_google_googleapis": ExternalProtoLibrary(
destination="third_party/googleapis",
proto_prefix="third_party/googleapis/",
),
"com_github_cncf_udpa": ExternalProtoLibrary(
destination="third_party/xds", proto_prefix="third_party/xds/"
),
"opencensus_proto": ExternalProtoLibrary(
destination="third_party/opencensus-proto/src",
proto_prefix="third_party/opencensus-proto/src/",
),
}
def _maybe_get_internal_path(name: str) -> Optional[str]:
for key in EXTERNAL_PROTO_LIBRARIES:
if name.startswith("@" + key):
return key
return None
def _bazel_query_xml_tree(query: str) -> ET.Element:
"""Get xml output of bazel query invocation, parsed as XML tree"""
output = subprocess.check_output(
["tools/bazel", "query", "--noimplicit_deps", "--output", "xml", query]
)
return ET.fromstring(output)
def _rule_dict_from_xml_node(rule_xml_node):
"""Converts XML node representing a rule (obtained from "bazel query --output xml") to a dictionary that contains all the metadata we will need."""
result = {
"class": rule_xml_node.attrib.get("class"),
"name": rule_xml_node.attrib.get("name"),
"srcs": [],
"hdrs": [],
"deps": [],
"data": [],
"tags": [],
"args": [],
"generator_function": None,
"size": None,
"flaky": False,
"actual": None, # the real target name for aliases
}
for child in rule_xml_node:
# all the metadata we want is stored under "list" tags
if child.tag == "list":
list_name = child.attrib["name"]
if list_name in ["srcs", "hdrs", "deps", "data", "tags", "args"]:
result[list_name] += [item.attrib["value"] for item in child]
if child.tag == "string":
string_name = child.attrib["name"]
if string_name in ["generator_function", "size"]:
result[string_name] = child.attrib["value"]
if child.tag == "boolean":
bool_name = child.attrib["name"]
if bool_name in ["flaky"]:
result[bool_name] = child.attrib["value"] == "true"
if child.tag == "label":
# extract actual name for alias rules
label_name = child.attrib["name"]
if label_name in ["actual"]:
actual_name = child.attrib.get("value", None)
if actual_name:
result["actual"] = actual_name
# HACK: since we do a lot of transitive dependency scanning,
# make it seem that the actual name is a dependency of the alias rule
# (aliases don't have dependencies themselves)
result["deps"].append(actual_name)
return result
def _extract_rules_from_bazel_xml(xml_tree):
"""Extract bazel rules from an XML tree node obtained from "bazel query --output xml" command."""
result = {}
for child in xml_tree:
if child.tag == "rule":
rule_dict = _rule_dict_from_xml_node(child)
rule_clazz = rule_dict["class"]
rule_name = rule_dict["name"]
if rule_clazz in [
"cc_library",
"cc_binary",
"cc_test",
"cc_proto_library",
"cc_proto_gen_validate",
"proto_library",
"upb_proto_library",
"upb_proto_reflection_library",
"alias",
]:
if rule_name in result:
raise Exception("Rule %s already present" % rule_name)
result[rule_name] = rule_dict
return result
def _get_bazel_label(target_name: str) -> str:
if target_name.startswith("@"):
return target_name
if ":" in target_name:
return "//%s" % target_name
else:
return "//:%s" % target_name
def _extract_source_file_path(label: str) -> str:
"""Gets relative path to source file from bazel deps listing"""
if label.startswith("//"):
label = label[len("//") :]
# labels in form //:src/core/lib/surface/call_test_only.h
if label.startswith(":"):
label = label[len(":") :]
# labels in form //test/core/util:port.cc
label = label.replace(":", "/")
return label
def _extract_public_headers(bazel_rule: BuildMetadata) -> List[str]:
"""Gets list of public headers from a bazel rule"""
result = []
for dep in bazel_rule["hdrs"]:
if dep.startswith("//:include/") and dep.endswith(".h"):
result.append(_extract_source_file_path(dep))
return list(sorted(result))
def _extract_nonpublic_headers(bazel_rule: BuildMetadata) -> List[str]:
"""Gets list of non-public headers from a bazel rule"""
result = []
for dep in bazel_rule["hdrs"]:
if (
dep.startswith("//")
and not dep.startswith("//:include/")
and dep.endswith(".h")
):
result.append(_extract_source_file_path(dep))
return list(sorted(result))
def _extract_sources(bazel_rule: BuildMetadata) -> List[str]:
"""Gets list of source files from a bazel rule"""
result = []
for src in bazel_rule["srcs"]:
if src.endswith(".cc") or src.endswith(".c") or src.endswith(".proto"):
if src.startswith("//"):
# This source file is local to gRPC
result.append(_extract_source_file_path(src))
else:
# This source file is external, and we need to translate the
# @REPO_NAME to a valid path prefix. At this stage, we need
# to check repo name, since the label/path mapping is not
# available in BUILD files.
external_proto_library_name = _maybe_get_internal_path(src)
if external_proto_library_name is not None:
result.append(
src.replace(
"@%s//" % external_proto_library_name,
EXTERNAL_PROTO_LIBRARIES[
external_proto_library_name
].proto_prefix,
).replace(":", "/")
)
return list(sorted(result))
def _extract_deps(
bazel_rule: BuildMetadata, bazel_rules: BuildDict
) -> List[str]:
"""Gets list of deps from from a bazel rule"""
deps = set(bazel_rule["deps"])
for src in bazel_rule["srcs"]:
if (
not src.endswith(".cc")
and not src.endswith(".c")
and not src.endswith(".proto")
):
if src in bazel_rules:
# This label doesn't point to a source file, but another Bazel
# target. This is required for :pkg_cc_proto_validate targets,
# and it's generally allowed by Bazel.
deps.add(src)
return list(sorted(list(deps)))
def _create_target_from_bazel_rule(
target_name: str, bazel_rules: BuildDict
) -> BuildMetadata:
"""Create build.yaml-like target definition from bazel metadata"""
bazel_rule = bazel_rules[_get_bazel_label(target_name)]
# Create a template for our target from the bazel rule. Initially we only
# populate some "private" fields with the original info we got from bazel
# and only later we will populate the public fields (once we do some extra
# postprocessing).
result = {
"name": target_name,
"_PUBLIC_HEADERS_BAZEL": _extract_public_headers(bazel_rule),
"_HEADERS_BAZEL": _extract_nonpublic_headers(bazel_rule),
"_SRC_BAZEL": _extract_sources(bazel_rule),
"_DEPS_BAZEL": _extract_deps(bazel_rule, bazel_rules),
"public_headers": bazel_rule["_COLLAPSED_PUBLIC_HEADERS"],
"headers": bazel_rule["_COLLAPSED_HEADERS"],
"src": bazel_rule["_COLLAPSED_SRCS"],
"deps": bazel_rule["_COLLAPSED_DEPS"],
}
return result
def _external_dep_name_from_bazel_dependency(bazel_dep: str) -> Optional[str]:
"""Returns name of dependency if external bazel dependency is provided or None"""
if bazel_dep.startswith("@com_google_absl//"):
# special case for add dependency on one of the absl libraries (there is not just one absl library)
prefixlen = len("@com_google_absl//")
return bazel_dep[prefixlen:]
elif bazel_dep == "//external:upb_lib":
return "upb"
elif bazel_dep == "//external:benchmark":
return "benchmark"
elif bazel_dep == "//external:libssl":
return "libssl"
else:
# all the other external deps such as protobuf, cares, zlib
# don't need to be listed explicitly, they are handled automatically
# by the build system (make, cmake)
return None
def _compute_transitive_metadata(
rule_name: str, bazel_rules: Any, bazel_label_to_dep_name: Dict[str, str]
) -> None:
"""Computes the final build metadata for Bazel target with rule_name.
The dependencies that will appear on the deps list are:
* Public build targets including binaries and tests;
* External targets, like absl, re2.
All other intermediate dependencies will be merged, which means their
source file, headers, etc. will be collected into one build target. This
step of processing will greatly reduce the complexity of the generated
build specifications for other build systems, like CMake, Make, setuptools.
The final build metadata are:
* _TRANSITIVE_DEPS: all the transitive dependencies including intermediate
targets;
* _COLLAPSED_DEPS: dependencies that fits our requirement above, and it
will remove duplicated items and produce the shortest
possible dependency list in alphabetical order;
* _COLLAPSED_SRCS: the merged source files;
* _COLLAPSED_PUBLIC_HEADERS: the merged public headers;
* _COLLAPSED_HEADERS: the merged non-public headers;
* _EXCLUDE_DEPS: intermediate targets to exclude when performing collapsing
of sources and dependencies.
For the collapsed_deps, the algorithm improved cases like:
The result in the past:
end2end_tests -> [grpc_test_util, grpc, gpr, address_sorting, upb]
grpc_test_util -> [grpc, gpr, address_sorting, upb, ...]
grpc -> [gpr, address_sorting, upb, ...]
The result of the algorithm:
end2end_tests -> [grpc_test_util]
grpc_test_util -> [grpc]
grpc -> [gpr, address_sorting, upb, ...]
"""
bazel_rule = bazel_rules[rule_name]
direct_deps = _extract_deps(bazel_rule, bazel_rules)
transitive_deps = set()
collapsed_deps = set()
exclude_deps = set()
collapsed_srcs = set(_extract_sources(bazel_rule))
collapsed_public_headers = set(_extract_public_headers(bazel_rule))
collapsed_headers = set(_extract_nonpublic_headers(bazel_rule))
for dep in direct_deps:
external_dep_name_maybe = _external_dep_name_from_bazel_dependency(dep)
if dep in bazel_rules:
# Descend recursively, but no need to do that for external deps
if external_dep_name_maybe is None:
if "_PROCESSING_DONE" not in bazel_rules[dep]:
# This item is not processed before, compute now
_compute_transitive_metadata(
dep, bazel_rules, bazel_label_to_dep_name
)
transitive_deps.update(
bazel_rules[dep].get("_TRANSITIVE_DEPS", [])
)
collapsed_deps.update(
collapsed_deps, bazel_rules[dep].get("_COLLAPSED_DEPS", [])
)
exclude_deps.update(bazel_rules[dep].get("_EXCLUDE_DEPS", []))
# This dep is a public target, add it as a dependency
if dep in bazel_label_to_dep_name:
transitive_deps.update([bazel_label_to_dep_name[dep]])
collapsed_deps.update(
collapsed_deps, [bazel_label_to_dep_name[dep]]
)
# Add all the transitive deps of our every public dep to exclude
# list since we want to avoid building sources that are already
# built by our dependencies
exclude_deps.update(bazel_rules[dep]["_TRANSITIVE_DEPS"])
continue
# This dep is an external target, add it as a dependency
if external_dep_name_maybe is not None:
transitive_deps.update([external_dep_name_maybe])
collapsed_deps.update(collapsed_deps, [external_dep_name_maybe])
continue
# Direct dependencies are part of transitive dependencies
transitive_deps.update(direct_deps)
# Calculate transitive public deps (needed for collapsing sources)
transitive_public_deps = set(
[x for x in transitive_deps if x in bazel_label_to_dep_name]
)
# Remove intermediate targets that our public dependencies already depend
# on. This is the step that further shorten the deps list.
collapsed_deps = set([x for x in collapsed_deps if x not in exclude_deps])
# Compute the final source files and headers for this build target whose
# name is `rule_name` (input argument of this function).
#
# Imaging a public target PX has transitive deps [IA, IB, PY, IC, PZ]. PX,
# PY and PZ are public build targets. And IA, IB, IC are intermediate
# targets. In addition, PY depends on IC.
#
# Translate the condition into dependency graph:
# PX -> [IA, IB, PY, IC, PZ]
# PY -> [IC]
# Public targets: [PX, PY, PZ]
#
# The collapsed dependencies of PX: [PY, PZ].
# The excluded dependencies of X: [PY, IC, PZ].
# (IC is excluded as a dependency of PX. It is already included in PY, hence
# it would be redundant to include it again.)
#
# Target PX should include source files and headers of [PX, IA, IB] as final
# build metadata.
for dep in transitive_deps:
if dep not in exclude_deps and dep not in transitive_public_deps:
if dep in bazel_rules:
collapsed_srcs.update(_extract_sources(bazel_rules[dep]))
collapsed_public_headers.update(
_extract_public_headers(bazel_rules[dep])
)
collapsed_headers.update(
_extract_nonpublic_headers(bazel_rules[dep])
)
# This item is a "visited" flag
bazel_rule["_PROCESSING_DONE"] = True
# Following items are described in the docstinrg.
bazel_rule["_TRANSITIVE_DEPS"] = list(sorted(transitive_deps))
bazel_rule["_COLLAPSED_DEPS"] = list(sorted(collapsed_deps))
bazel_rule["_COLLAPSED_SRCS"] = list(sorted(collapsed_srcs))
bazel_rule["_COLLAPSED_PUBLIC_HEADERS"] = list(
sorted(collapsed_public_headers)
)
bazel_rule["_COLLAPSED_HEADERS"] = list(sorted(collapsed_headers))
bazel_rule["_EXCLUDE_DEPS"] = list(sorted(exclude_deps))
# TODO(jtattermusch): deduplicate with transitive_dependencies.py (which has a
# slightly different logic)
# TODO(jtattermusch): This is done to avoid introducing too many intermediate
# libraries into the build.yaml-based builds (which might in cause issues
# building language-specific artifacts) and also because the libraries in
# build.yaml-based build are generally considered units of distributions (=
# public libraries that are visible to the user and are installable), while in
# bazel builds it is customary to define larger number of smaller
# "sublibraries". The need for elision (and expansion) of intermediate libraries
# can be re-evaluated in the future.
def _populate_transitive_metadata(
bazel_rules: Any, public_dep_names: Iterable[str]
) -> None:
"""Add 'transitive_deps' field for each of the rules"""
# Create the map between Bazel label and public dependency name
bazel_label_to_dep_name = {}
for dep_name in public_dep_names:
bazel_label_to_dep_name[_get_bazel_label(dep_name)] = dep_name
# Make sure we reached all the Bazel rules
# TODO(lidiz) potentially we could only update a subset of rules
for rule_name in bazel_rules:
if "_PROCESSING_DONE" not in bazel_rules[rule_name]:
_compute_transitive_metadata(
rule_name, bazel_rules, bazel_label_to_dep_name
)
def update_test_metadata_with_transitive_metadata(
all_extra_metadata: BuildDict, bazel_rules: BuildDict
) -> None:
"""Patches test build metadata with transitive metadata."""
for lib_name, lib_dict in list(all_extra_metadata.items()):
# Skip if it isn't not an test
if lib_dict.get("build") != "test" or lib_dict.get("_TYPE") != "target":
continue
bazel_rule = bazel_rules[_get_bazel_label(lib_name)]
if "//external:benchmark" in bazel_rule["_TRANSITIVE_DEPS"]:
lib_dict["benchmark"] = True
lib_dict["defaults"] = "benchmark"
if "//external:gtest" in bazel_rule["_TRANSITIVE_DEPS"]:
lib_dict["gtest"] = True
lib_dict["language"] = "c++"
def _get_transitive_protos(bazel_rules, t):
que = [
t,
]
visited = set()
ret = []
while que:
name = que.pop(0)
rule = bazel_rules.get(name, None)
if rule:
for dep in rule["deps"]:
if dep not in visited:
visited.add(dep)
que.append(dep)
for src in rule["srcs"]:
if src.endswith(".proto"):
ret.append(src)
return list(set(ret))
def _expand_upb_proto_library_rules(bazel_rules):
# Expand the .proto files from UPB proto library rules into the pre-generated
# upb.h and upb.c files.
GEN_UPB_ROOT = "//:src/core/ext/upb-generated/"
GEN_UPBDEFS_ROOT = "//:src/core/ext/upbdefs-generated/"
EXTERNAL_LINKS = [
("@com_google_protobuf//", "src/"),
("@com_google_googleapis//", ""),
("@com_github_cncf_udpa//", ""),
("@com_envoyproxy_protoc_gen_validate//", ""),
("@envoy_api//", ""),
("@opencensus_proto//", ""),
]
for name, bazel_rule in bazel_rules.items():
gen_func = bazel_rule.get("generator_function", None)
if gen_func in (
"grpc_upb_proto_library",
"grpc_upb_proto_reflection_library",
):
# get proto dependency
deps = bazel_rule["deps"]
if len(deps) != 1:
raise Exception(
'upb rule "{0}" should have 1 proto dependency but has'
' "{1}"'.format(name, deps)
)
# deps is not properly fetched from bazel query for upb_proto_library target
# so add the upb dependency manually
bazel_rule["deps"] = [
"//external:upb_lib",
"//external:upb_lib_descriptor",
"//external:upb_generated_code_support__only_for_generated_code_do_not_use__i_give_permission_to_break_me",
]
# populate the upb_proto_library rule with pre-generated upb headers
# and sources using proto_rule
protos = _get_transitive_protos(bazel_rules, deps[0])
if len(protos) == 0:
raise Exception(
'upb rule "{0}" should have at least one proto file.'.format(
name
)
)
srcs = []
hdrs = []
for proto_src in protos:
for external_link in EXTERNAL_LINKS:
if proto_src.startswith(external_link[0]):
prefix_to_strip = external_link[0] + external_link[1]
if not proto_src.startswith(prefix_to_strip):
raise Exception(
'Source file "{0}" in upb rule {1} does not'
' have the expected prefix "{2}"'.format(
proto_src, name, prefix_to_strip
)
)
proto_src = proto_src[len(prefix_to_strip) :]
break
if proto_src.startswith("@"):
raise Exception('"{0}" is unknown workspace.'.format(name))
proto_src = _extract_source_file_path(proto_src)
ext = (
".upb"
if gen_func == "grpc_upb_proto_library"
else ".upbdefs"
)
root = (
GEN_UPB_ROOT
if gen_func == "grpc_upb_proto_library"
else GEN_UPBDEFS_ROOT
)
srcs.append(root + proto_src.replace(".proto", ext + ".c"))
hdrs.append(root + proto_src.replace(".proto", ext + ".h"))
bazel_rule["srcs"] = srcs
bazel_rule["hdrs"] = hdrs
def _generate_build_metadata(
build_extra_metadata: BuildDict, bazel_rules: BuildDict
) -> BuildDict:
"""Generate build metadata in build.yaml-like format bazel build metadata and build.yaml-specific "extra metadata"."""
lib_names = list(build_extra_metadata.keys())
result = {}
for lib_name in lib_names:
lib_dict = _create_target_from_bazel_rule(lib_name, bazel_rules)
# populate extra properties from the build.yaml-specific "extra metadata"
lib_dict.update(build_extra_metadata.get(lib_name, {}))
# store to results
result[lib_name] = lib_dict
# Rename targets marked with "_RENAME" extra metadata.
# This is mostly a cosmetic change to ensure that we end up with build.yaml target
# names we're used to from the past (and also to avoid too long target names).
# The rename step needs to be made after we're done with most of processing logic
# otherwise the already-renamed libraries will have different names than expected
for lib_name in lib_names:
to_name = build_extra_metadata.get(lib_name, {}).get("_RENAME", None)
if to_name:
# store lib under the new name and also change its 'name' property
if to_name in result:
raise Exception(
"Cannot rename target "
+ str(lib_name)
+ ", "
+ str(to_name)
+ " already exists."
)
lib_dict = result.pop(lib_name)
lib_dict["name"] = to_name
result[to_name] = lib_dict
# dep names need to be updated as well
for lib_dict_to_update in list(result.values()):
lib_dict_to_update["deps"] = list(
[
to_name if dep == lib_name else dep
for dep in lib_dict_to_update["deps"]
]
)
return result
def _convert_to_build_yaml_like(lib_dict: BuildMetadata) -> BuildYaml:
lib_names = [
lib_name
for lib_name in list(lib_dict.keys())
if lib_dict[lib_name].get("_TYPE", "library") == "library"
]
target_names = [
lib_name
for lib_name in list(lib_dict.keys())
if lib_dict[lib_name].get("_TYPE", "library") == "target"
]
test_names = [
lib_name
for lib_name in list(lib_dict.keys())
if lib_dict[lib_name].get("_TYPE", "library") == "test"
]
# list libraries and targets in predefined order
lib_list = [lib_dict[lib_name] for lib_name in lib_names]
target_list = [lib_dict[lib_name] for lib_name in target_names]
test_list = [lib_dict[lib_name] for lib_name in test_names]
# get rid of temporary private fields prefixed with "_" and some other useless fields
for lib in lib_list:
for field_to_remove in [
k for k in list(lib.keys()) if k.startswith("_")
]:
lib.pop(field_to_remove, None)
for target in target_list:
for field_to_remove in [
k for k in list(target.keys()) if k.startswith("_")
]:
target.pop(field_to_remove, None)
target.pop(
"public_headers", None
) # public headers make no sense for targets
for test in test_list:
for field_to_remove in [
k for k in list(test.keys()) if k.startswith("_")
]:
test.pop(field_to_remove, None)
test.pop(
"public_headers", None
) # public headers make no sense for tests
build_yaml_like = {
"libs": lib_list,
"filegroups": [],
"targets": target_list,
"tests": test_list,
}
return build_yaml_like
def _extract_cc_tests(bazel_rules: BuildDict) -> List[str]:
"""Gets list of cc_test tests from bazel rules"""
result = []
for bazel_rule in list(bazel_rules.values()):
if bazel_rule["class"] == "cc_test":
test_name = bazel_rule["name"]
if test_name.startswith("//"):
prefixlen = len("//")
result.append(test_name[prefixlen:])
return list(sorted(result))
def _exclude_unwanted_cc_tests(tests: List[str]) -> List[str]:
"""Filters out bazel tests that we don't want to run with other build systems or we cannot build them reasonably"""
# most qps tests are autogenerated, we are fine without them
tests = [test for test in tests if not test.startswith("test/cpp/qps:")]
# microbenchmarks aren't needed for checking correctness
tests = [
test
for test in tests
if not test.startswith("test/cpp/microbenchmarks:")
]
tests = [
test
for test in tests
if not test.startswith("test/core/promise/benchmark:")
]
# we have trouble with census dependency outside of bazel
tests = [
test
for test in tests
if not test.startswith("test/cpp/ext/filters/census:")
and not test.startswith("test/core/xds:xds_channel_stack_modifier_test")
and not test.startswith("test/cpp/ext/gcp:")
and not test.startswith("test/cpp/ext/filters/logging:")
and not test.startswith("test/cpp/interop:observability_interop")
]
# we have not added otel dependency outside of bazel
tests = [
test for test in tests if not test.startswith("test/cpp/ext/otel:")
]
# missing opencensus/stats/stats.h
tests = [
test
for test in tests
if not test.startswith(
"test/cpp/end2end:server_load_reporting_end2end_test"
)
]
tests = [
test
for test in tests
if not test.startswith(
"test/cpp/server/load_reporter:lb_load_reporter_test"
)
]
# The test uses --running_under_bazel cmdline argument
# To avoid the trouble needing to adjust it, we just skip the test
tests = [
test
for test in tests
if not test.startswith(
"test/cpp/naming:resolver_component_tests_runner_invoker"
)
]
# the test requires 'client_crash_test_server' to be built
tests = [
test
for test in tests
if not test.startswith("test/cpp/end2end:time_change_test")
]
# the test requires 'client_crash_test_server' to be built
tests = [
test
for test in tests
if not test.startswith("test/cpp/end2end:client_crash_test")
]
# the test requires 'server_crash_test_client' to be built
tests = [
test
for test in tests
if not test.startswith("test/cpp/end2end:server_crash_test")
]
# test never existed under build.yaml and it fails -> skip it
tests = [
test
for test in tests
if not test.startswith("test/core/tsi:ssl_session_cache_test")
]
# the binary of this test does not get built with cmake
tests = [
test
for test in tests
if not test.startswith("test/cpp/util:channelz_sampler_test")
]
# we don't need to generate fuzzers outside of bazel
tests = [test for test in tests if not test.endswith("_fuzzer")]
return tests
def _generate_build_extra_metadata_for_tests(
tests: List[str], bazel_rules: BuildDict
) -> BuildDict:
"""For given tests, generate the "extra metadata" that we need for our "build.yaml"-like output. The extra metadata is generated from the bazel rule metadata by using a bunch of heuristics."""
test_metadata = {}
for test in tests:
test_dict = {"build": "test", "_TYPE": "target"}
bazel_rule = bazel_rules[_get_bazel_label(test)]
bazel_tags = bazel_rule["tags"]
if "manual" in bazel_tags:
# don't run the tests marked as "manual"
test_dict["run"] = False
if bazel_rule["flaky"]:
# don't run tests that are marked as "flaky" under bazel
# because that would only add noise for the run_tests.py tests
# and seeing more failures for tests that we already know are flaky
# doesn't really help anything
test_dict["run"] = False
if "no_uses_polling" in bazel_tags:
test_dict["uses_polling"] = False
if "grpc_fuzzer" == bazel_rule["generator_function"]:
# currently we hand-list fuzzers instead of generating them automatically
# because there's no way to obtain maxlen property from bazel BUILD file.
print(("skipping fuzzer " + test))
continue
if "bazel_only" in bazel_tags:
continue
# if any tags that restrict platform compatibility are present,
# generate the "platforms" field accordingly
# TODO(jtattermusch): there is also a "no_linux" tag, but we cannot take
# it into account as it is applied by grpc_cc_test when poller expansion
# is made (for tests where uses_polling=True). So for now, we just
# assume all tests are compatible with linux and ignore the "no_linux" tag
# completely.
known_platform_tags = set(["no_windows", "no_mac"])
if set(bazel_tags).intersection(known_platform_tags):
platforms = []
# assume all tests are compatible with linux and posix
platforms.append("linux")
platforms.append(
"posix"
) # there is no posix-specific tag in bazel BUILD
if "no_mac" not in bazel_tags:
platforms.append("mac")
if "no_windows" not in bazel_tags:
platforms.append("windows")
test_dict["platforms"] = platforms
cmdline_args = bazel_rule["args"]
if cmdline_args:
test_dict["args"] = list(cmdline_args)
if test.startswith("test/cpp"):
test_dict["language"] = "c++"
elif test.startswith("test/core"):
test_dict["language"] = "c"
else:
raise Exception("wrong test" + test)
# short test name without the path.
# There can be name collisions, but we will resolve them later
simple_test_name = os.path.basename(_extract_source_file_path(test))
test_dict["_RENAME"] = simple_test_name
test_metadata[test] = test_dict
# detect duplicate test names
tests_by_simple_name = {}
for test_name, test_dict in list(test_metadata.items()):
simple_test_name = test_dict["_RENAME"]
if simple_test_name not in tests_by_simple_name:
tests_by_simple_name[simple_test_name] = []
tests_by_simple_name[simple_test_name].append(test_name)
# choose alternative names for tests with a name collision
for collision_list in list(tests_by_simple_name.values()):
if len(collision_list) > 1:
for test_name in collision_list:
long_name = test_name.replace("/", "_").replace(":", "_")
print(
'short name of "%s" collides with another test, renaming'
" to %s" % (test_name, long_name)
)
test_metadata[test_name]["_RENAME"] = long_name
return test_metadata
def _parse_http_archives(xml_tree: ET.Element) -> "List[ExternalProtoLibrary]":
"""Parse Bazel http_archive rule into ExternalProtoLibrary objects."""
result = []
for xml_http_archive in xml_tree:
if (
xml_http_archive.tag != "rule"
or xml_http_archive.attrib["class"] != "http_archive"
):
continue
# A distilled Python representation of Bazel http_archive
http_archive = dict()
for xml_node in xml_http_archive:
if xml_node.attrib["name"] == "name":
http_archive["name"] = xml_node.attrib["value"]
if xml_node.attrib["name"] == "urls":
http_archive["urls"] = []
for url_node in xml_node:
http_archive["urls"].append(url_node.attrib["value"])
if xml_node.attrib["name"] == "url":
http_archive["urls"] = [xml_node.attrib["value"]]
if xml_node.attrib["name"] == "sha256":
http_archive["hash"] = xml_node.attrib["value"]
if xml_node.attrib["name"] == "strip_prefix":
http_archive["strip_prefix"] = xml_node.attrib["value"]
if http_archive["name"] not in EXTERNAL_PROTO_LIBRARIES:
# If this http archive is not one of the external proto libraries,
# we don't want to include it as a CMake target
continue
lib = EXTERNAL_PROTO_LIBRARIES[http_archive["name"]]
lib.urls = http_archive["urls"]
lib.hash = http_archive["hash"]
lib.strip_prefix = http_archive["strip_prefix"]
result.append(lib)
return result
def _generate_external_proto_libraries() -> List[Dict[str, Any]]:
"""Generates the build metadata for external proto libraries"""
xml_tree = _bazel_query_xml_tree("kind(http_archive, //external:*)")
libraries = _parse_http_archives(xml_tree)
libraries.sort(key=lambda x: x.destination)
return list(map(lambda x: x.__dict__, libraries))
def _detect_and_print_issues(build_yaml_like: BuildYaml) -> None:
"""Try detecting some unusual situations and warn about them."""
for tgt in build_yaml_like["targets"]:
if tgt["build"] == "test":
for src in tgt["src"]:
if src.startswith("src/") and not src.endswith(".proto"):
print(
(
'source file from under "src/" tree used in test '
+ tgt["name"]
+ ": "
+ src
)
)
# extra metadata that will be used to construct build.yaml
# there are mostly extra properties that we weren't able to obtain from the bazel build
# _TYPE: whether this is library, target or test
# _RENAME: whether this target should be renamed to a different name (to match expectations of make and cmake builds)
_BUILD_EXTRA_METADATA = {
"third_party/address_sorting:address_sorting": {
"language": "c",
"build": "all",
"_RENAME": "address_sorting",
},
"gpr": {
"language": "c",
"build": "all",
},
"grpc": {
"language": "c",
"build": "all",
"baselib": True,
"generate_plugin_registry": True,
},
"grpc++": {
"language": "c++",
"build": "all",
"baselib": True,
},
"grpc++_alts": {"language": "c++", "build": "all", "baselib": True},
"grpc++_error_details": {"language": "c++", "build": "all"},
"grpc++_reflection": {"language": "c++", "build": "all"},
"grpc_authorization_provider": {"language": "c++", "build": "all"},
"grpc++_unsecure": {
"language": "c++",
"build": "all",
"baselib": True,
},
"grpc_unsecure": {
"language": "c",
"build": "all",
"baselib": True,
"generate_plugin_registry": True,
},
"grpcpp_channelz": {"language": "c++", "build": "all"},
"grpc++_test": {
"language": "c++",
"build": "private",
},
"src/compiler:grpc_plugin_support": {
"language": "c++",
"build": "protoc",
"_RENAME": "grpc_plugin_support",
},
"src/compiler:grpc_cpp_plugin": {
"language": "c++",
"build": "protoc",
"_TYPE": "target",
"_RENAME": "grpc_cpp_plugin",
},
"src/compiler:grpc_csharp_plugin": {
"language": "c++",
"build": "protoc",
"_TYPE": "target",
"_RENAME": "grpc_csharp_plugin",
},
"src/compiler:grpc_node_plugin": {
"language": "c++",
"build": "protoc",
"_TYPE": "target",
"_RENAME": "grpc_node_plugin",
},
"src/compiler:grpc_objective_c_plugin": {
"language": "c++",
"build": "protoc",
"_TYPE": "target",
"_RENAME": "grpc_objective_c_plugin",
},
"src/compiler:grpc_php_plugin": {
"language": "c++",
"build": "protoc",
"_TYPE": "target",
"_RENAME": "grpc_php_plugin",
},
"src/compiler:grpc_python_plugin": {
"language": "c++",
"build": "protoc",
"_TYPE": "target",
"_RENAME": "grpc_python_plugin",
},
"src/compiler:grpc_ruby_plugin": {
"language": "c++",
"build": "protoc",
"_TYPE": "target",
"_RENAME": "grpc_ruby_plugin",
},
# TODO(jtattermusch): consider adding grpc++_core_stats
# test support libraries
"test/core/util:grpc_test_util": {
"language": "c",
"build": "private",
"_RENAME": "grpc_test_util",
},
"test/core/util:grpc_test_util_unsecure": {
"language": "c",
"build": "private",
"_RENAME": "grpc_test_util_unsecure",
},
# TODO(jtattermusch): consider adding grpc++_test_util_unsecure - it doesn't seem to be used by bazel build (don't forget to set secure: False)
"test/cpp/util:test_config": {
"language": "c++",
"build": "private",
"_RENAME": "grpc++_test_config",
},
"test/cpp/util:test_util": {
"language": "c++",
"build": "private",
"_RENAME": "grpc++_test_util",
},
# benchmark support libraries
"test/cpp/microbenchmarks:helpers": {
"language": "c++",
"build": "test",
"defaults": "benchmark",
"_RENAME": "benchmark_helpers",
},
"test/cpp/interop:interop_client": {
"language": "c++",
"build": "test",
"run": False,
"_TYPE": "target",
"_RENAME": "interop_client",
},
"test/cpp/interop:interop_server": {
"language": "c++",
"build": "test",
"run": False,
"_TYPE": "target",
"_RENAME": "interop_server",
},
"test/cpp/interop:xds_interop_client": {
"language": "c++",
"build": "test",
"run": False,
"_TYPE": "target",
"_RENAME": "xds_interop_client",
},
"test/cpp/interop:xds_interop_server": {
"language": "c++",
"build": "test",
"run": False,
"_TYPE": "target",
"_RENAME": "xds_interop_server",
},
"test/cpp/interop:http2_client": {
"language": "c++",
"build": "test",
"run": False,
"_TYPE": "target",
"_RENAME": "http2_client",
},
"test/cpp/qps:qps_json_driver": {
"language": "c++",
"build": "test",
"run": False,
"_TYPE": "target",
"_RENAME": "qps_json_driver",
},
"test/cpp/qps:qps_worker": {
"language": "c++",
"build": "test",
"run": False,
"_TYPE": "target",
"_RENAME": "qps_worker",
},
"test/cpp/util:grpc_cli": {
"language": "c++",
"build": "test",
"run": False,
"_TYPE": "target",
"_RENAME": "grpc_cli",
},
# TODO(jtattermusch): create_jwt and verify_jwt breaks distribtests because it depends on grpc_test_utils and thus requires tests to be built
# For now it's ok to disable them as these binaries aren't very useful anyway.
# 'test/core/security:create_jwt': { 'language': 'c', 'build': 'tool', '_TYPE': 'target', '_RENAME': 'grpc_create_jwt' },
# 'test/core/security:verify_jwt': { 'language': 'c', 'build': 'tool', '_TYPE': 'target', '_RENAME': 'grpc_verify_jwt' },
# TODO(jtattermusch): add remaining tools such as grpc_print_google_default_creds_token (they are not used by bazel build)
# TODO(jtattermusch): these fuzzers had no build.yaml equivalent
# test/core/compression:message_compress_fuzzer
# test/core/compression:message_decompress_fuzzer
# test/core/compression:stream_compression_fuzzer
# test/core/compression:stream_decompression_fuzzer
# test/core/slice:b64_decode_fuzzer
# test/core/slice:b64_encode_fuzzer
}
# We need a complete picture of all the targets and dependencies we're interested in
# so we run multiple bazel queries and merge the results.
_BAZEL_DEPS_QUERIES = [
'deps("//test/...")',
'deps("//:all")',
'deps("//src/compiler/...")',
# The ^ is needed to differentiate proto_library from go_proto_library
'deps(kind("^proto_library", @envoy_api//envoy/...))',
]
# Step 1: run a bunch of "bazel query --output xml" queries to collect
# the raw build metadata from the bazel build.
# At the end of this step we will have a dictionary of bazel rules
# that are interesting to us (libraries, binaries, etc.) along
# with their most important metadata (sources, headers, dependencies)
#
# Example of a single bazel rule after being populated:
# '//:grpc' : { 'class': 'cc_library',
# 'hdrs': ['//:include/grpc/byte_buffer.h', ... ],
# 'srcs': ['//:src/core/lib/surface/init.cc', ... ],
# 'deps': ['//:grpc_common', ...],
# ... }
bazel_rules = {}
for query in _BAZEL_DEPS_QUERIES:
bazel_rules.update(
_extract_rules_from_bazel_xml(_bazel_query_xml_tree(query))
)
# Step 1.5: The sources for UPB protos are pre-generated, so we want
# to expand the UPB proto library bazel rules into the generated
# .upb.h and .upb.c files.
_expand_upb_proto_library_rules(bazel_rules)
# Step 2: Extract the known bazel cc_test tests. While most tests
# will be buildable with other build systems just fine, some of these tests
# would be too difficult to build and run with other build systems,
# so we simply exclude the ones we don't want.
# Note that while making tests buildable with other build systems
# than just bazel is extra effort, we still need to do that for these
# reasons:
# - If our cmake build doesn't have any tests at all, it's hard to make
# sure that what it built actually works (we need at least some "smoke tests").
# This is quite important because the build flags between bazel / non-bazel flag might differ
# (sometimes it's for interesting reasons that are not easy to overcome)
# which makes it even more important to have at least some tests for cmake/make
# - Our portability suite actually runs cmake tests and migration of portability
# suite fully towards bazel might be intricate (e.g. it's unclear whether it's
# possible to get a good enough coverage of different compilers / distros etc.
# with bazel)
# - some things that are considered "tests" in build.yaml-based builds are actually binaries
# we'd want to be able to build anyway (qps_json_worker, interop_client, interop_server, grpc_cli)
# so it's unclear how much make/cmake simplification we would gain by removing just some (but not all) test
# TODO(jtattermusch): Investigate feasibility of running portability suite with bazel.
tests = _exclude_unwanted_cc_tests(_extract_cc_tests(bazel_rules))
# Step 3: Generate the "extra metadata" for all our build targets.
# While the bazel rules give us most of the information we need,
# the legacy "build.yaml" format requires some additional fields that
# we cannot get just from bazel alone (we call that "extra metadata").
# In this step, we basically analyze the build metadata we have from bazel
# and use heuristics to determine (and sometimes guess) the right
# extra metadata to use for each target.
#
# - For some targets (such as the public libraries, helper libraries
# and executables) determining the right extra metadata is hard to do
# automatically. For these targets, the extra metadata is supplied "manually"
# in form of the _BUILD_EXTRA_METADATA dictionary. That allows us to match
# the semantics of the legacy "build.yaml" as closely as possible.
#
# - For test binaries, it is possible to generate the "extra metadata" mostly
# automatically using a rule-based heuristic approach because most tests
# look and behave alike from the build's perspective.
#
# TODO(jtattermusch): Of course neither "_BUILD_EXTRA_METADATA" or
# the heuristic approach used for tests are ideal and they cannot be made
# to cover all possible situations (and are tailored to work with the way
# the grpc build currently works), but the idea was to start with something
# reasonably simple that matches the "build.yaml"-like semantics as closely
# as possible (to avoid changing too many things at once) and gradually get
# rid of the legacy "build.yaml"-specific fields one by one. Once that is done,
# only very little "extra metadata" would be needed and/or it would be trivial
# to generate it automatically.
all_extra_metadata = {}
all_extra_metadata.update(_BUILD_EXTRA_METADATA)
all_extra_metadata.update(
_generate_build_extra_metadata_for_tests(tests, bazel_rules)
)
# Step 4: Compute the build metadata that will be used in the final build.yaml.
# The final build metadata includes transitive dependencies, and sources/headers
# expanded without intermediate dependencies.
# Example:
# '//:grpc' : { ...,
# '_TRANSITIVE_DEPS': ['//:gpr_base', ...],
# '_COLLAPSED_DEPS': ['gpr', ...],
# '_COLLAPSED_SRCS': [...],
# '_COLLAPSED_PUBLIC_HEADERS': [...],
# '_COLLAPSED_HEADERS': [...]
# }
_populate_transitive_metadata(bazel_rules, list(all_extra_metadata.keys()))
# Step 4a: Update the existing test metadata with the updated build metadata.
# Certain build metadata of certain test targets depend on the transitive
# metadata that wasn't available earlier.
update_test_metadata_with_transitive_metadata(all_extra_metadata, bazel_rules)
# Step 5: Generate the final metadata for all the targets.
# This is done by combining the bazel build metadata and the "extra metadata"
# we obtained in the previous step.
# In this step, we also perform some interesting massaging of the target metadata
# to end up with a result that is as similar to the legacy build.yaml data
# as possible.
# - Some targets get renamed (to match the legacy build.yaml target names)
# - Some intermediate libraries get elided ("expanded") to better match the set
# of targets provided by the legacy build.yaml build
#
# Originally the target renaming was introduced to address these concerns:
# - avoid changing too many things at the same time and avoid people getting
# confused by some well know targets suddenly being missing
# - Makefile/cmake and also language-specific generators rely on some build
# targets being called exactly the way they they are. Some of our testing
# scrips also invoke executables (e.g. "qps_json_driver") by their name.
# - The autogenerated test name from bazel includes the package path
# (e.g. "test_cpp_TEST_NAME"). Without renaming, the target names would
# end up pretty ugly (e.g. test_cpp_qps_qps_json_driver).
# TODO(jtattermusch): reevaluate the need for target renaming in the future.
#
# Example of a single generated target:
# 'grpc' : { 'language': 'c',
# 'public_headers': ['include/grpc/byte_buffer.h', ... ],
# 'headers': ['src/core/ext/filters/client_channel/client_channel.h', ... ],
# 'src': ['src/core/lib/surface/init.cc', ... ],
# 'deps': ['gpr', 'address_sorting', ...],
# ... }
all_targets_dict = _generate_build_metadata(all_extra_metadata, bazel_rules)
# Step 6: convert the dictionary with all the targets to a dict that has
# the desired "build.yaml"-like layout.
# TODO(jtattermusch): We use the custom "build.yaml"-like layout because
# currently all other build systems use that format as their source of truth.
# In the future, we can get rid of this custom & legacy format entirely,
# but we would need to update the generators for other build systems
# at the same time.
#
# Layout of the result:
# { 'libs': { TARGET_DICT_FOR_LIB_XYZ, ... },
# 'targets': { TARGET_DICT_FOR_BIN_XYZ, ... },
# 'tests': { TARGET_DICT_FOR_TEST_XYZ, ...} }
build_yaml_like = _convert_to_build_yaml_like(all_targets_dict)
# Step 7: generates build metadata for external ProtoBuf libraries.
# We only want the ProtoBuf sources from these ProtoBuf dependencies, which may
# not be present in our release source tar balls. These rules will be used in CMake
# to download these libraries if not existed. Even if the download failed, it
# will be a soft error that doesn't block existing target from successfully
# built.
build_yaml_like[
"external_proto_libraries"
] = _generate_external_proto_libraries()
# detect and report some suspicious situations we've seen before
_detect_and_print_issues(build_yaml_like)
# Step 7: Store the build_autogenerated.yaml in a deterministic (=sorted)
# and cleaned-up form.
# A basic overview of the resulting "build.yaml"-like format is here:
# https://github.com/grpc/grpc/blob/master/templates/README.md
# TODO(jtattermusch): The "cleanup" function is taken from the legacy
# build system (which used build.yaml) and can be eventually removed.
build_yaml_string = build_cleaner.cleaned_build_yaml_dict_as_string(
build_yaml_like
)
with open("build_autogenerated.yaml", "w") as file:
file.write(build_yaml_string)
| 53,561
| 39.181545
| 196
|
py
|
grpc
|
grpc-master/tools/buildgen/build_cleaner.py
|
#!/usr/bin/env python3
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# produces cleaner build.yaml files
import collections
import os
import sys
import yaml
TEST = os.environ.get("TEST", "false") == "true"
_TOP_LEVEL_KEYS = [
"settings",
"proto_deps",
"filegroups",
"libs",
"targets",
"vspackages",
]
_ELEM_KEYS = [
"name",
"gtest",
"cpu_cost",
"flaky",
"build",
"run",
"language",
"public_headers",
"headers",
"src",
"deps",
]
def repr_ordered_dict(dumper, odict):
return dumper.represent_mapping(
"tag:yaml.org,2002:map", list(odict.items())
)
yaml.add_representer(collections.OrderedDict, repr_ordered_dict)
def _rebuild_as_ordered_dict(indict, special_keys):
outdict = collections.OrderedDict()
for key in sorted(indict.keys()):
if "#" in key:
outdict[key] = indict[key]
for key in special_keys:
if key in indict:
outdict[key] = indict[key]
for key in sorted(indict.keys()):
if key in special_keys:
continue
if "#" in key:
continue
outdict[key] = indict[key]
return outdict
def _clean_elem(indict):
for name in ["public_headers", "headers", "src"]:
if name not in indict:
continue
inlist = indict[name]
protos = list(x for x in inlist if os.path.splitext(x)[1] == ".proto")
others = set(x for x in inlist if x not in protos)
indict[name] = protos + sorted(others)
return _rebuild_as_ordered_dict(indict, _ELEM_KEYS)
def cleaned_build_yaml_dict_as_string(indict):
"""Takes dictionary which represents yaml file and returns the cleaned-up yaml string"""
js = _rebuild_as_ordered_dict(indict, _TOP_LEVEL_KEYS)
for grp in ["filegroups", "libs", "targets"]:
if grp not in js:
continue
js[grp] = sorted(
[_clean_elem(x) for x in js[grp]],
key=lambda x: (x.get("language", "_"), x["name"]),
)
output = yaml.dump(js, indent=2, width=80, default_flow_style=False)
# massage out trailing whitespace
lines = []
for line in output.splitlines():
lines.append(line.rstrip() + "\n")
output = "".join(lines)
return output
if __name__ == "__main__":
for filename in sys.argv[1:]:
with open(filename) as f:
js = yaml.safe_load(f)
output = cleaned_build_yaml_dict_as_string(js)
if TEST:
with open(filename) as f:
if not f.read() == output:
raise Exception(
"Looks like build-cleaner.py has not been run for file"
' "%s"?' % filename
)
else:
with open(filename, "w") as f:
f.write(output)
| 3,369
| 27.083333
| 92
|
py
|
grpc
|
grpc-master/tools/buildgen/_mako_renderer.py
|
#!/usr/bin/env python3
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple Mako renderer.
Just a wrapper around the mako rendering library.
"""
import getopt
import glob
import importlib.util
import os
import pickle
import shutil
import sys
from typing import List
from mako import exceptions
from mako.lookup import TemplateLookup
from mako.runtime import Context
from mako.template import Template
import yaml
PROJECT_ROOT = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "..", ".."
)
# TODO(lidiz) find a better way for plugins to reference each other
sys.path.append(os.path.join(PROJECT_ROOT, "tools", "buildgen", "plugins"))
def out(msg: str) -> None:
print(msg, file=sys.stderr)
def showhelp() -> None:
out(
"mako-renderer.py [-o out] [-m cache] [-P preprocessed_input] [-d dict]"
" [-d dict...] [-t template] [-w preprocessed_output]"
)
def render_template(template: Template, context: Context) -> None:
"""Render the mako template with given context.
Prints an error template to indicate where and what in the template caused
the render failure.
"""
try:
template.render_context(context)
except:
out(exceptions.text_error_template().render())
raise
def main(argv: List[str]) -> None:
got_input = False
module_directory = None
preprocessed_output = None
dictionary = {}
json_dict = {}
got_output = False
output_name = None
got_preprocessed_input = False
output_merged = None
try:
opts, args = getopt.getopt(argv, "hM:m:o:t:P:")
except getopt.GetoptError:
out("Unknown option")
showhelp()
sys.exit(2)
for opt, arg in opts:
if opt == "-h":
out("Displaying showhelp")
showhelp()
sys.exit()
elif opt == "-o":
if got_output:
out("Got more than one output")
showhelp()
sys.exit(3)
got_output = True
output_name = arg
elif opt == "-m":
if module_directory is not None:
out("Got more than one cache directory")
showhelp()
sys.exit(4)
module_directory = arg
elif opt == "-M":
if output_merged is not None:
out("Got more than one output merged path")
showhelp()
sys.exit(5)
output_merged = arg
elif opt == "-P":
assert not got_preprocessed_input
assert json_dict == {}
with open(arg, "rb") as dict_file:
dictionary = pickle.load(dict_file)
got_preprocessed_input = True
cleared_dir = False
for arg in args:
got_input = True
with open(arg) as f:
srcs = list(yaml.safe_load_all(f.read()))
for src in srcs:
if isinstance(src, str):
assert len(srcs) == 1
template = Template(
src,
filename=arg,
module_directory=module_directory,
lookup=TemplateLookup(directories=["."]),
)
with open(output_name, "w") as output_file:
render_template(
template, Context(output_file, **dictionary)
)
else:
# we have optional control data: this template represents
# a directory
if not cleared_dir:
if not os.path.exists(output_name):
pass
elif os.path.isfile(output_name):
os.unlink(output_name)
else:
shutil.rmtree(output_name, ignore_errors=True)
cleared_dir = True
items = []
if "foreach" in src:
for el in dictionary[src["foreach"]]:
if "cond" in src:
args = dict(dictionary)
args["selected"] = el
if not eval(src["cond"], {}, args):
continue
items.append(el)
assert items
else:
items = [None]
for item in items:
args = dict(dictionary)
args["selected"] = item
item_output_name = os.path.join(
output_name, Template(src["output_name"]).render(**args)
)
if not os.path.exists(os.path.dirname(item_output_name)):
os.makedirs(os.path.dirname(item_output_name))
template = Template(
src["template"],
filename=arg,
module_directory=module_directory,
lookup=TemplateLookup(directories=["."]),
)
with open(item_output_name, "w") as output_file:
render_template(template, Context(output_file, **args))
if not got_input and not preprocessed_output:
out("Got nothing to do")
showhelp()
if __name__ == "__main__":
main(sys.argv[1:])
| 5,911
| 31.844444
| 80
|
py
|
grpc
|
grpc-master/tools/buildgen/generate_projects.py
|
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import glob
import multiprocessing
import os
import pickle
import shutil
import sys
import tempfile
from typing import Dict, List, Union
import _utils
import yaml
PROJECT_ROOT = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "..", ".."
)
os.chdir(PROJECT_ROOT)
# TODO(lidiz) find a better way for plugins to reference each other
sys.path.append(os.path.join(PROJECT_ROOT, "tools", "buildgen", "plugins"))
# from tools.run_tests.python_utils import jobset
jobset = _utils.import_python_module(
os.path.join(
PROJECT_ROOT, "tools", "run_tests", "python_utils", "jobset.py"
)
)
PREPROCESSED_BUILD = ".preprocessed_build"
test = {} if os.environ.get("TEST", "false") == "true" else None
assert sys.argv[1:], "run generate_projects.sh instead of this directly"
parser = argparse.ArgumentParser()
parser.add_argument(
"build_files",
nargs="+",
default=[],
help="build files describing build specs",
)
parser.add_argument(
"--templates", nargs="+", default=[], help="mako template files to render"
)
parser.add_argument(
"--output_merged",
"-m",
default="",
type=str,
help="merge intermediate results to a file",
)
parser.add_argument(
"--jobs",
"-j",
default=multiprocessing.cpu_count(),
type=int,
help="maximum parallel jobs",
)
parser.add_argument(
"--base", default=".", type=str, help="base path for generated files"
)
args = parser.parse_args()
def preprocess_build_files() -> _utils.Bunch:
"""Merges build yaml into a one dictionary then pass it to plugins."""
build_spec = dict()
for build_file in args.build_files:
with open(build_file, "r") as f:
_utils.merge_json(build_spec, yaml.safe_load(f.read()))
# Executes plugins. Plugins update the build spec in-place.
for py_file in sorted(glob.glob("tools/buildgen/plugins/*.py")):
plugin = _utils.import_python_module(py_file)
plugin.mako_plugin(build_spec)
if args.output_merged:
with open(args.output_merged, "w") as f:
f.write(yaml.dump(build_spec))
# Makes build_spec sort of immutable and dot-accessible
return _utils.to_bunch(build_spec)
def generate_template_render_jobs(templates: List[str]) -> List[jobset.JobSpec]:
"""Generate JobSpecs for each one of the template rendering work."""
jobs = []
base_cmd = [sys.executable, "tools/buildgen/_mako_renderer.py"]
for template in sorted(templates, reverse=True):
root, f = os.path.split(template)
if os.path.splitext(f)[1] == ".template":
out_dir = args.base + root[len("templates") :]
out = os.path.join(out_dir, os.path.splitext(f)[0])
if not os.path.exists(out_dir):
os.makedirs(out_dir)
cmd = base_cmd[:]
cmd.append("-P")
cmd.append(PREPROCESSED_BUILD)
cmd.append("-o")
if test is None:
cmd.append(out)
else:
tf = tempfile.mkstemp()
test[out] = tf[1]
os.close(tf[0])
cmd.append(test[out])
cmd.append(args.base + "/" + root + "/" + f)
jobs.append(
jobset.JobSpec(cmd, shortname=out, timeout_seconds=None)
)
return jobs
def main() -> None:
templates = args.templates
if not templates:
for root, _, files in os.walk("templates"):
for f in files:
templates.append(os.path.join(root, f))
build_spec = preprocess_build_files()
with open(PREPROCESSED_BUILD, "wb") as f:
pickle.dump(build_spec, f)
err_cnt, _ = jobset.run(
generate_template_render_jobs(templates), maxjobs=args.jobs
)
if err_cnt != 0:
print(
"ERROR: %s error(s) found while generating projects." % err_cnt,
file=sys.stderr,
)
sys.exit(1)
if test is not None:
for s, g in test.items():
if os.path.isfile(g):
assert 0 == os.system("diff %s %s" % (s, g)), s
os.unlink(g)
else:
assert 0 == os.system("diff -r %s %s" % (s, g)), s
shutil.rmtree(g, ignore_errors=True)
if __name__ == "__main__":
main()
| 4,880
| 30.490323
| 80
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.